hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6772f47be90751a8ab2cbacfba1c7b99baa2b64a
| 102
|
py
|
Python
|
caiman/models.py
|
Rockstreet/usman_min
|
c15145a444cbc913a1349b69dffc0b8a45e38dbb
|
[
"MIT"
] | null | null | null |
caiman/models.py
|
Rockstreet/usman_min
|
c15145a444cbc913a1349b69dffc0b8a45e38dbb
|
[
"MIT"
] | null | null | null |
caiman/models.py
|
Rockstreet/usman_min
|
c15145a444cbc913a1349b69dffc0b8a45e38dbb
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
| 10.2
| 65
| 0.784314
| 14
| 102
| 5.571429
| 0.714286
| 0.25641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 102
| 9
| 66
| 11.333333
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
679841fb13e9e1b6f465dd6a052897627ff56964
| 40,992
|
py
|
Python
|
datalabeling/google/cloud/datalabeling_v1beta1/proto/data_labeling_service_pb2_grpc.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 2
|
2021-11-26T07:08:43.000Z
|
2022-03-07T20:20:04.000Z
|
datalabeling/google/cloud/datalabeling_v1beta1/proto/data_labeling_service_pb2_grpc.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 6
|
2019-05-27T22:05:58.000Z
|
2019-08-05T16:46:16.000Z
|
datalabeling/google/cloud/datalabeling_v1beta1/proto/data_labeling_service_pb2_grpc.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 1
|
2019-03-29T18:26:16.000Z
|
2019-03-29T18:26:16.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.datalabeling_v1beta1.proto import (
annotation_spec_set_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
data_labeling_service_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
dataset_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
evaluation_job_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
evaluation_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
instruction_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_instruction__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class DataLabelingServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateDataset = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/CreateDataset",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateDatasetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Dataset.FromString,
)
self.GetDataset = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetDataset",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetDatasetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Dataset.FromString,
)
self.ListDatasets = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListDatasets",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDatasetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDatasetsResponse.FromString,
)
self.DeleteDataset = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/DeleteDataset",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteDatasetRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ImportData = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ImportData",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ImportDataRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ExportData = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ExportData",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ExportDataRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetDataItem = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetDataItem",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetDataItemRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.DataItem.FromString,
)
self.ListDataItems = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListDataItems",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDataItemsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDataItemsResponse.FromString,
)
self.GetAnnotatedDataset = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetAnnotatedDataset",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetAnnotatedDatasetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.AnnotatedDataset.FromString,
)
self.ListAnnotatedDatasets = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListAnnotatedDatasets",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotatedDatasetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotatedDatasetsResponse.FromString,
)
self.DeleteAnnotatedDataset = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/DeleteAnnotatedDataset",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteAnnotatedDatasetRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.LabelImage = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/LabelImage",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelImageRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.LabelVideo = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/LabelVideo",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelVideoRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.LabelText = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/LabelText",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelTextRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetExample = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetExample",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetExampleRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Example.FromString,
)
self.ListExamples = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListExamples",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListExamplesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListExamplesResponse.FromString,
)
self.CreateAnnotationSpecSet = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/CreateAnnotationSpecSet",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateAnnotationSpecSetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2.AnnotationSpecSet.FromString,
)
self.GetAnnotationSpecSet = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetAnnotationSpecSet",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetAnnotationSpecSetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2.AnnotationSpecSet.FromString,
)
self.ListAnnotationSpecSets = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListAnnotationSpecSets",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotationSpecSetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotationSpecSetsResponse.FromString,
)
self.DeleteAnnotationSpecSet = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/DeleteAnnotationSpecSet",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteAnnotationSpecSetRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateInstruction = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/CreateInstruction",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateInstructionRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetInstruction = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetInstruction",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetInstructionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_instruction__pb2.Instruction.FromString,
)
self.ListInstructions = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListInstructions",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListInstructionsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListInstructionsResponse.FromString,
)
self.DeleteInstruction = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/DeleteInstruction",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteInstructionRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetEvaluation = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetEvaluation",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetEvaluationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__pb2.Evaluation.FromString,
)
self.SearchEvaluations = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/SearchEvaluations",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchEvaluationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchEvaluationsResponse.FromString,
)
self.SearchExampleComparisons = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/SearchExampleComparisons",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchExampleComparisonsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchExampleComparisonsResponse.FromString,
)
self.CreateEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/CreateEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.FromString,
)
self.UpdateEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/UpdateEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.UpdateEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.FromString,
)
self.GetEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.FromString,
)
self.PauseEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/PauseEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.PauseEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ResumeEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ResumeEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ResumeEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/DeleteEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListEvaluationJobs = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListEvaluationJobs",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListEvaluationJobsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListEvaluationJobsResponse.FromString,
)
class DataLabelingServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def CreateDataset(self, request, context):
"""Creates dataset. If success return a Dataset resource.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetDataset(self, request, context):
"""Gets dataset by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListDatasets(self, request, context):
"""Lists datasets under a project. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteDataset(self, request, context):
"""Deletes a dataset by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ImportData(self, request, context):
"""Imports data into dataset based on source locations defined in request.
It can be called multiple times for the same dataset. Each dataset can
only have one long running operation running on it. For example, no
labeling task (also long running operation) can be started while
importing is still ongoing. Vice versa.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExportData(self, request, context):
"""Exports data and annotations from dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetDataItem(self, request, context):
"""Gets a data item in a dataset by resource name. This API can be
called after data are imported into dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListDataItems(self, request, context):
"""Lists data items in a dataset. This API can be called after data
are imported into dataset. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetAnnotatedDataset(self, request, context):
"""Gets an annotated dataset by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListAnnotatedDatasets(self, request, context):
"""Lists annotated datasets for a dataset. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteAnnotatedDataset(self, request, context):
"""Deletes an annotated dataset by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def LabelImage(self, request, context):
"""Starts a labeling task for image. The type of image labeling task is
configured by feature in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def LabelVideo(self, request, context):
"""Starts a labeling task for video. The type of video labeling task is
configured by feature in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def LabelText(self, request, context):
"""Starts a labeling task for text. The type of text labeling task is
configured by feature in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetExample(self, request, context):
"""Gets an example by resource name, including both data and annotation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListExamples(self, request, context):
"""Lists examples in an annotated dataset. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateAnnotationSpecSet(self, request, context):
"""Creates an annotation spec set by providing a set of labels.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetAnnotationSpecSet(self, request, context):
"""Gets an annotation spec set by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListAnnotationSpecSets(self, request, context):
"""Lists annotation spec sets for a project. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteAnnotationSpecSet(self, request, context):
"""Deletes an annotation spec set by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateInstruction(self, request, context):
"""Creates an instruction for how data should be labeled.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetInstruction(self, request, context):
"""Gets an instruction by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListInstructions(self, request, context):
"""Lists instructions for a project. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteInstruction(self, request, context):
"""Deletes an instruction object by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetEvaluation(self, request, context):
"""Gets an evaluation by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SearchEvaluations(self, request, context):
"""Searchs evaluations within a project. Supported filter: evaluation_job,
evaluation_time.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SearchExampleComparisons(self, request, context):
"""Searchs example comparisons in evaluation, in format of examples
of both ground truth and prediction(s). It is represented as a search with
evaluation id.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateEvaluationJob(self, request, context):
"""Creates an evaluation job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateEvaluationJob(self, request, context):
"""Updates an evaluation job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetEvaluationJob(self, request, context):
"""Gets an evaluation job by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PauseEvaluationJob(self, request, context):
"""Pauses an evaluation job. Pausing a evaluation job that is already in
PAUSED state will be a no-op.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ResumeEvaluationJob(self, request, context):
"""Resumes a paused evaluation job. Deleted evaluation job can't be resumed.
Resuming a running evaluation job will be a no-op.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteEvaluationJob(self, request, context):
"""Stops and deletes an evaluation job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListEvaluationJobs(self, request, context):
"""Lists all evaluation jobs within a project with possible filters.
Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_DataLabelingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateDataset": grpc.unary_unary_rpc_method_handler(
servicer.CreateDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString,
),
"GetDataset": grpc.unary_unary_rpc_method_handler(
servicer.GetDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString,
),
"ListDatasets": grpc.unary_unary_rpc_method_handler(
servicer.ListDatasets,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDatasetsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDatasetsResponse.SerializeToString,
),
"DeleteDataset": grpc.unary_unary_rpc_method_handler(
servicer.DeleteDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteDatasetRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ImportData": grpc.unary_unary_rpc_method_handler(
servicer.ImportData,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ImportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ExportData": grpc.unary_unary_rpc_method_handler(
servicer.ExportData,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ExportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetDataItem": grpc.unary_unary_rpc_method_handler(
servicer.GetDataItem,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetDataItemRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.DataItem.SerializeToString,
),
"ListDataItems": grpc.unary_unary_rpc_method_handler(
servicer.ListDataItems,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDataItemsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDataItemsResponse.SerializeToString,
),
"GetAnnotatedDataset": grpc.unary_unary_rpc_method_handler(
servicer.GetAnnotatedDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetAnnotatedDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.AnnotatedDataset.SerializeToString,
),
"ListAnnotatedDatasets": grpc.unary_unary_rpc_method_handler(
servicer.ListAnnotatedDatasets,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotatedDatasetsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotatedDatasetsResponse.SerializeToString,
),
"DeleteAnnotatedDataset": grpc.unary_unary_rpc_method_handler(
servicer.DeleteAnnotatedDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteAnnotatedDatasetRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"LabelImage": grpc.unary_unary_rpc_method_handler(
servicer.LabelImage,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelImageRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"LabelVideo": grpc.unary_unary_rpc_method_handler(
servicer.LabelVideo,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelVideoRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"LabelText": grpc.unary_unary_rpc_method_handler(
servicer.LabelText,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelTextRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetExample": grpc.unary_unary_rpc_method_handler(
servicer.GetExample,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetExampleRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Example.SerializeToString,
),
"ListExamples": grpc.unary_unary_rpc_method_handler(
servicer.ListExamples,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListExamplesRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListExamplesResponse.SerializeToString,
),
"CreateAnnotationSpecSet": grpc.unary_unary_rpc_method_handler(
servicer.CreateAnnotationSpecSet,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateAnnotationSpecSetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2.AnnotationSpecSet.SerializeToString,
),
"GetAnnotationSpecSet": grpc.unary_unary_rpc_method_handler(
servicer.GetAnnotationSpecSet,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetAnnotationSpecSetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2.AnnotationSpecSet.SerializeToString,
),
"ListAnnotationSpecSets": grpc.unary_unary_rpc_method_handler(
servicer.ListAnnotationSpecSets,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotationSpecSetsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotationSpecSetsResponse.SerializeToString,
),
"DeleteAnnotationSpecSet": grpc.unary_unary_rpc_method_handler(
servicer.DeleteAnnotationSpecSet,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteAnnotationSpecSetRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"CreateInstruction": grpc.unary_unary_rpc_method_handler(
servicer.CreateInstruction,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateInstructionRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetInstruction": grpc.unary_unary_rpc_method_handler(
servicer.GetInstruction,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetInstructionRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_instruction__pb2.Instruction.SerializeToString,
),
"ListInstructions": grpc.unary_unary_rpc_method_handler(
servicer.ListInstructions,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListInstructionsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListInstructionsResponse.SerializeToString,
),
"DeleteInstruction": grpc.unary_unary_rpc_method_handler(
servicer.DeleteInstruction,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteInstructionRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetEvaluation": grpc.unary_unary_rpc_method_handler(
servicer.GetEvaluation,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetEvaluationRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__pb2.Evaluation.SerializeToString,
),
"SearchEvaluations": grpc.unary_unary_rpc_method_handler(
servicer.SearchEvaluations,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchEvaluationsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchEvaluationsResponse.SerializeToString,
),
"SearchExampleComparisons": grpc.unary_unary_rpc_method_handler(
servicer.SearchExampleComparisons,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchExampleComparisonsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchExampleComparisonsResponse.SerializeToString,
),
"CreateEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.CreateEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateEvaluationJobRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.SerializeToString,
),
"UpdateEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.UpdateEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.UpdateEvaluationJobRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.SerializeToString,
),
"GetEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.GetEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetEvaluationJobRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.SerializeToString,
),
"PauseEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.PauseEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.PauseEvaluationJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ResumeEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.ResumeEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ResumeEvaluationJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"DeleteEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.DeleteEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteEvaluationJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ListEvaluationJobs": grpc.unary_unary_rpc_method_handler(
servicer.ListEvaluationJobs,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListEvaluationJobsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListEvaluationJobsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.datalabeling.v1beta1.DataLabelingService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| 63.553488
| 169
| 0.782616
| 4,173
| 40,992
| 7.123173
| 0.064941
| 0.100353
| 0.054634
| 0.066341
| 0.82947
| 0.818066
| 0.813625
| 0.768646
| 0.684575
| 0.680875
| 0
| 0.013513
| 0.158714
| 40,992
| 644
| 170
| 63.652174
| 0.848431
| 0.069965
| 0
| 0.360078
| 1
| 0
| 0.12071
| 0.068671
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07045
| false
| 0.003914
| 0.031311
| 0
| 0.105675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6798bb647c9031d2653050d76cd3f241dd42a5cd
| 2,734
|
py
|
Python
|
sdk/python/pulumi_azure_native/batch/__init__.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/batch/__init__.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/batch/__init__.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .application import *
from .application_package import *
from .batch_account import *
from .certificate import *
from .get_application import *
from .get_application_package import *
from .get_batch_account import *
from .get_certificate import *
from .get_pool import *
from .list_batch_account_keys import *
from .pool import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.batch.v20151201 as __v20151201
v20151201 = __v20151201
import pulumi_azure_native.batch.v20170101 as __v20170101
v20170101 = __v20170101
import pulumi_azure_native.batch.v20170501 as __v20170501
v20170501 = __v20170501
import pulumi_azure_native.batch.v20170901 as __v20170901
v20170901 = __v20170901
import pulumi_azure_native.batch.v20181201 as __v20181201
v20181201 = __v20181201
import pulumi_azure_native.batch.v20190401 as __v20190401
v20190401 = __v20190401
import pulumi_azure_native.batch.v20190801 as __v20190801
v20190801 = __v20190801
import pulumi_azure_native.batch.v20200301 as __v20200301
v20200301 = __v20200301
import pulumi_azure_native.batch.v20200501 as __v20200501
v20200501 = __v20200501
import pulumi_azure_native.batch.v20200901 as __v20200901
v20200901 = __v20200901
import pulumi_azure_native.batch.v20210101 as __v20210101
v20210101 = __v20210101
import pulumi_azure_native.batch.v20210601 as __v20210601
v20210601 = __v20210601
else:
v20151201 = _utilities.lazy_import('pulumi_azure_native.batch.v20151201')
v20170101 = _utilities.lazy_import('pulumi_azure_native.batch.v20170101')
v20170501 = _utilities.lazy_import('pulumi_azure_native.batch.v20170501')
v20170901 = _utilities.lazy_import('pulumi_azure_native.batch.v20170901')
v20181201 = _utilities.lazy_import('pulumi_azure_native.batch.v20181201')
v20190401 = _utilities.lazy_import('pulumi_azure_native.batch.v20190401')
v20190801 = _utilities.lazy_import('pulumi_azure_native.batch.v20190801')
v20200301 = _utilities.lazy_import('pulumi_azure_native.batch.v20200301')
v20200501 = _utilities.lazy_import('pulumi_azure_native.batch.v20200501')
v20200901 = _utilities.lazy_import('pulumi_azure_native.batch.v20200901')
v20210101 = _utilities.lazy_import('pulumi_azure_native.batch.v20210101')
v20210601 = _utilities.lazy_import('pulumi_azure_native.batch.v20210601')
| 43.396825
| 80
| 0.793343
| 330
| 2,734
| 6.160606
| 0.206061
| 0.141663
| 0.200689
| 0.27152
| 0.513527
| 0.513527
| 0.242007
| 0
| 0
| 0
| 0
| 0.244595
| 0.137162
| 2,734
| 62
| 81
| 44.096774
| 0.617211
| 0.084492
| 0
| 0
| 1
| 0
| 0.168269
| 0.168269
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.740741
| 0
| 0.740741
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
679e2250d3e4704bdc0cc067419d5a8f3eb454fa
| 12,983
|
py
|
Python
|
python-numpy-lists/numpylists.py
|
tosinayanda/python-starter-kit
|
9faee168ff82e46b6ef8102ae72ea936fd099961
|
[
"MIT"
] | null | null | null |
python-numpy-lists/numpylists.py
|
tosinayanda/python-starter-kit
|
9faee168ff82e46b6ef8102ae72ea936fd099961
|
[
"MIT"
] | null | null | null |
python-numpy-lists/numpylists.py
|
tosinayanda/python-starter-kit
|
9faee168ff82e46b6ef8102ae72ea936fd099961
|
[
"MIT"
] | null | null | null |
#
import numpy as np
#create numpy arrays
#
#Generate array
height=np.round(np.random.normal(1.75,0.20,5000),2)
weight=np.round(np.random.normal(60.32,15,5000),2)
np_city=np.column_stack((height,weight))
print(np_city.shape)
cars=["Toyota","Chevrolet","Ford","Honda","Brabus"]
cars_np=np.array(cars)
weight=[20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23]
baseball=[[74, 180], [74, 215], [72, 210], [72, 210], [73, 188], [69, 176], [69, 209], [71, 200], [76, 231], [71, 180], [73, 188], [73, 180], [74, 185], [74, 160], [69, 180], [70, 185], [73, 189], [75, 185], [78, 219], [79, 230], [76, 205], [74, 230], [76, 195], [72, 180], [71, 192], [75, 225], [77, 203], [74, 195], [73, 182], [74, 188], [78, 200], [73, 180], [75, 200], [73, 200], [75, 245], [75, 240], [74, 215], [69, 185], [71, 175], [74, 199], [73, 200], [73, 215], [76, 200], [74, 205], [74, 206], [70, 186], [72, 188], [77, 220], [74, 210], [70, 195], [73, 200], [75, 200], [76, 212], [76, 224], [78, 210], [74, 205], [74, 220], [76, 195], [77, 200], [81, 260], [78, 228], [75, 270], [77, 200], [75, 210], [76, 190], [74, 220], [72, 180], [72, 205], [75, 210], [73, 220], [73, 211], [73, 200], [70, 180], [70, 190], [70, 170], [76, 230], [68, 155], [71, 185], [72, 185], [75, 200], [75, 225], [75, 225], [75, 220], [68, 160], [74, 205], [78, 235], [71, 250], [73, 210], [76, 190], [74, 160], [74, 200], [79, 205], [75, 222], [73, 195], [76, 205], [74, 220], [74, 220], [73, 170], [72, 185], [74, 195], [73, 220], [74, 230], [72, 180], [73, 220], [69, 180], [72, 180], [73, 170], [75, 210], [75, 215], [73, 200], [72, 213], [72, 180], [76, 192], [74, 235], [72, 185], [77, 235], [74, 210], [77, 222], [75, 210], [76, 230], [80, 220], [74, 180], [74, 190], [75, 200], [78, 210], [73, 194], [73, 180], [74, 190], [75, 240], [76, 200], [71, 198], [73, 200], [74, 195], [76, 210], [76, 220], [74, 190], [73, 210], [74, 225], [70, 180], [72, 185], [73, 170], [73, 185], [73, 185], [73, 180], [71, 178], [74, 175], [74, 200], [72, 204], [74, 211], [71, 190], [74, 210], [73, 190], [75, 190], [75, 185], [79, 290], [73, 175], [75, 185], [76, 200], [74, 220], [76, 170], [78, 220], [74, 190], [76, 220], [72, 205], [74, 200], [76, 250],
[74, 225], [75, 215], [78, 210], [75, 215], [72, 195], [74, 200], [72, 194], [74, 220], [70, 180], [71, 180], [70, 170], [75, 195], [71, 180], [71, 170], [73, 206], [72, 205], [71, 200], [73, 225], [72, 201], [75, 225], [74, 233], [74, 180], [75, 225], [73, 180], [77, 220], [73, 180], [76, 237], [75, 215], [74, 190], [76, 235], [75, 190], [73, 180], [71, 165], [76, 195], [75, 200], [72, 190], [71, 190], [77, 185], [73, 185], [74, 205], [71, 190], [72, 205], [74, 206], [75, 220], [73, 208], [72, 170], [75, 195], [75, 210], [74, 190], [72, 211], [74, 230], [71, 170], [70, 185], [74, 185], [77, 241], [77, 225], [75, 210], [75, 175], [78, 230], [75, 200], [76, 215], [73, 198], [75, 226], [75, 278], [79, 215], [77, 230], [76, 240], [71, 184], [75, 219], [74, 170], [69, 218], [71, 190], [76, 225], [72, 220], [72, 176], [70, 190], [72, 197], [73, 204], [71, 167], [72, 180], [71, 195], [73, 220], [72, 215], [73, 185], [74, 190], [74, 205], [72, 205], [75, 200], [74, 210], [74, 215], [77, 200], [75, 205], [73, 211], [72, 190], [71, 208], [74, 200], [77, 210], [75, 232], [75, 230], [75, 210], [78, 220], [78, 210], [74, 202], [76, 212], [78, 225], [76, 170], [70, 190], [72, 200], [80, 237], [74, 220], [74, 170], [71, 193], [70, 190], [72, 150], [71, 220], [74, 200], [71, 190], [72, 185], [71, 185], [74, 200], [69, 172], [76, 220], [75, 225], [75, 190], [76, 195], [73, 219], [76, 190], [73, 197], [77, 200], [73, 195], [72, 210], [72, 177], [77, 220], [77, 235], [71, 180], [74, 195], [74, 195], [73, 190], [78, 230], [75, 190], [73, 200], [70, 190], [74, 190], [72, 200], [73, 200], [73, 184], [75, 200], [75, 180], [74, 219], [76, 187], [73, 200], [74, 220], [75, 205], [75, 190], [72, 170], [73, 160], [73, 215], [72, 175], [74, 205], [78, 200], [76, 214], [73, 200], [74, 190], [75, 180], [70, 205], [75, 220], [71, 190], [72, 215], [78, 235], [75, 191], [73, 200], [73, 181], [71, 200], [75, 210], [77, 240], [72, 185], [69, 165], [73, 190], [74, 185], [72, 175], [70, 155], [75, 210], [70, 170], [72, 175], [72, 220], [74, 210], [73, 205], [74, 200], [76, 205], [75, 195], [80, 240], [72, 150], [75, 200], [73, 215], [74, 202], [74, 200], [73, 190], [75, 205], [75, 190], [71, 160], [73, 215], [75, 185], [74, 200], [74, 190], [72, 210], [74, 185], [74, 220], [74, 190], [73, 202], [76, 205], [75, 220], [72, 175], [73, 160], [73, 190], [73, 200], [72, 229], [72, 206], [72, 220], [72, 180], [71, 195], [75, 175], [75, 188], [74, 230], [73, 190], [75, 200], [79, 190], [74, 219], [76, 235], [73, 180], [74, 180], [74, 180], [72, 200], [74, 234], [74, 185], [75, 220], [78, 223], [74, 200], [74, 210], [74, 200], [77, 210], [70, 190], [73, 177], [74, 227], [73, 180], [71, 195], [75, 199], [71, 175], [72, 185], [77, 240], [74, 210], [70, 180], [77, 194], [73, 225], [72, 180], [76, 205], [71, 193], [76, 230], [78, 230], [75, 220], [73, 200], [78, 249], [74, 190], [79, 208], [75, 245], [76, 250],
[72, 160], [75, 192], [75, 220], [70, 170], [72, 197], [70, 155], [74, 190], [71, 200], [76, 220], [73, 210], [76, 228], [71, 190], [69, 160], [72, 184], [72, 180], [69, 180], [73, 200], [69, 176], [73, 160], [74, 222], [74, 211], [72, 195], [71, 200], [72, 175], [72, 206], [76, 240], [76, 185], [76, 260], [74, 185], [76, 221], [75, 205], [71, 200], [72, 170], [71, 201], [73, 205], [75, 185], [76, 205], [75, 245], [71, 220], [75, 210], [74, 220], [72, 185], [73, 175], [73, 170], [73, 180], [73, 200], [76, 210], [72, 175], [76, 220], [73, 206], [73, 180], [73, 210], [75, 195], [75, 200], [77, 200], [73, 164], [72, 180], [75, 220], [70, 195], [74, 205], [72, 170], [80, 240], [71, 210], [71, 195], [74, 200], [74, 205], [73, 192], [75, 190], [76, 170], [73, 240], [77, 200], [72, 205], [73, 175], [77, 250], [76, 220], [71, 224], [75, 210], [73, 195], [74, 180], [77, 245], [71, 175], [72, 180], [73, 215], [69, 175], [73, 180], [70, 195], [74, 230], [76, 230], [73, 205], [73, 215], [75, 195], [73, 180], [79, 205], [74, 180], [73, 190], [74, 180], [77, 190], [75, 190], [74, 220], [73, 210], [77, 255], [73, 190], [77, 230], [74, 200], [74, 205], [73, 210], [77, 225], [74, 215], [77, 220], [75, 205], [77, 200], [75, 220], [71, 197], [74, 225], [70, 187], [79, 245], [72, 185], [72, 185], [70, 175], [74, 200], [74, 180], [72, 188], [73, 225], [72, 200], [74, 210], [74, 245], [76, 213], [82, 231], [74, 165], [74, 228], [70, 210], [73, 250], [73, 191], [74, 190], [77, 200], [72, 215], [76, 254], [73, 232], [73, 180], [72, 215], [74, 220], [74, 180], [71, 200], [72, 170], [75, 195], [74, 210], [74, 200], [77, 220], [70, 165], [71, 180], [73, 200], [76, 200], [71, 170], [75, 224], [74, 220], [72, 180], [76, 198], [79, 240], [76, 239], [73, 185], [76, 210], [78, 220], [75, 200], [76, 195], [72, 220], [72, 230], [73, 170], [73, 220], [75, 230], [71, 165], [76, 205], [70, 192], [75, 210], [74, 205], [75, 200], [73, 210], [71, 185], [71, 195], [72, 202], [73, 205], [73, 195], [72, 180], [69, 200], [73, 185], [78, 240], [71, 185], [73, 220], [75, 205], [76, 205], [70, 180], [74, 201], [77, 190], [75, 208], [79, 240], [72, 180], [77, 230], [73, 195], [75, 215], [75, 190], [75, 195], [73, 215], [73, 215], [76, 220], [77, 220], [75, 230], [70, 195], [71, 190], [71, 195], [75, 209], [74, 204], [69, 170], [70, 185], [75, 205], [72, 175], [75, 210], [73, 190], [72, 180], [72, 180], [72, 160], [76, 235], [75, 200], [74, 210], [69, 180], [73, 190], [72, 197], [72, 203], [75, 205], [77, 170], [76, 200], [80, 250], [77, 200], [76, 220], [79, 200], [71, 190], [75, 170], [73, 190], [76, 220], [77, 215], [73, 206], [76, 215], [70, 185], [75, 235], [73, 188], [75, 230], [70, 195], [69, 168], [71, 190], [72, 160], [72, 200], [73, 200], [70, 189], [70, 180], [73, 190], [76, 200], [75, 220], [72, 187], [73, 240], [79, 190], [71, 180], [72, 185], [74, 210], [74, 220], [74, 219], [72, 190], [76, 193], [76, 175], [72, 180], [72, 215], [71, 210], [72, 200], [72, 190], [70, 185], [77, 220], [74, 170], [72, 195], [76, 205], [71, 195], [76, 210], [71, 190], [73, 190], [70, 180], [73, 220], [73, 190], [72, 186], [71, 185], [71, 190], [71, 180], [72, 190], [72, 170], [74, 210], [74, 240], [74, 220], [71, 180], [72, 210], [75, 210], [72, 195], [71, 160], [72, 180], [72, 205], [72, 200], [72, 185], [74, 245], [74, 190], [77, 210], [75, 200], [73, 200], [75, 222], [73, 215], [76, 240], [72, 170], [77, 220], [75, 156], [72, 190], [71, 202], [71, 221], [75, 200], [72, 190], [73, 210], [73, 190], [71, 200], [70, 165], [75, 190], [71, 185], [76, 230], [73, 208], [68, 209], [71, 175], [72, 180], [74, 200], [77, 205], [72, 200], [76, 250], [78, 210], [81, 230], [72, 244], [73, 202], [76, 240], [72, 200], [72, 215], [74, 177], [76, 210], [73, 170], [76, 215], [75, 217], [70, 198], [71, 200], [74, 220], [72, 170], [73, 200], [76, 230], [76, 231], [73, 183], [71, 192], [68, 167], [71, 190], [71, 180], [74, 180], [77, 215], [69, 160], [72, 205], [76, 223], [75, 175], [76, 170], [75, 190], [76, 240], [72, 175], [74, 230], [76, 223], [74, 196], [72, 167], [75, 195], [78, 190], [77, 250], [70, 190], [72, 190], [79, 190], [74, 170], [71, 160], [68, 150], [77, 225], [75, 220], [71, 209], [72, 210], [70, 176], [72, 260], [72, 195], [73, 190], [72, 184], [74, 180], [72, 195], [72, 195], [75, 219], [72, 225], [73, 212], [74, 202], [72, 185], [78, 200], [75, 209], [72, 200], [74, 195], [75, 228], [75, 210], [76, 190], [74, 212], [74, 190], [73, 218], [74, 220], [71, 190], [74, 235], [75, 210], [76, 200], [74, 188], [76, 210], [76, 235], [73, 188], [75, 215], [75, 216], [74, 220], [68, 180], [72, 185], [75, 200], [71, 210], [70, 220], [72, 185], [73, 231], [72, 210], [75, 195], [74, 200], [70, 205], [76, 200], [71, 190], [82, 250], [72, 185], [73, 180], [74, 170], [71, 180], [75, 208], [77, 235], [72, 215], [74, 244], [72, 220], [73, 185], [78, 230], [77, 190], [73, 200], [73, 180], [73, 190], [73, 196],
[73, 180], [76, 230], [75, 224], [70, 160], [73, 178], [72, 205], [73, 185], [75, 210], [74, 180], [73, 190], [73, 200], [76, 257], [73, 190], [75, 220], [70, 165], [77, 205], [72, 200], [77, 208], [74, 185], [75, 215], [75, 170], [75, 235], [75, 210], [72, 170],
[74, 180], [71, 170], [76, 190], [71, 150], [75, 230], [76, 203], [83, 260], [75, 246], [74, 186], [76, 210],
[72, 198], [72, 210], [75, 215], [75, 180], [72, 200], [77, 245], [73, 200], [72, 192], [70, 192], [74, 200], [72, 192],
[74, 205], [72, 190], [71, 186], [70, 170], [71, 197], [76, 219], [74, 200], [76, 220], [74, 207], [74, 225], [74, 207],
[75, 212], [75, 225], [71, 170], [71, 190], [74, 210], [77, 230], [71, 210], [74, 200], [75, 238], [77, 234], [76, 222],
[74, 200], [76, 190], [72, 170], [71, 220], [72, 223], [75, 210], [73, 215], [68, 196], [72, 175], [69, 175], [73, 189],
[73, 205], [75, 210], [70, 180], [70, 180], [74, 197], [75, 220], [74, 228], [74, 190], [73, 204], [74, 165], [75, 216],
[77, 220], [73, 208], [74, 210], [76, 215], [74, 195], [75, 200], [73, 215], [76, 229], [78, 240], [75, 207], [73, 205],
[77, 208], [74, 185], [72, 190], [74, 170], [72, 208], [71, 225], [73, 190], [75, 225], [73, 185], [67, 180], [67, 165],
[76, 240], [74, 220], [73, 212], [70, 163], [75, 215], [70, 175], [72, 205], [77, 210], [79, 205], [78, 208], [74, 215],
[75, 180], [75, 200], [78, 230], [76, 211], [75, 230], [69, 190], [75, 220], [72, 180], [75, 205], [73, 190], [74, 180],
[75, 205], [75, 190], [73, 195]]
weight_np=np.array(weight)
#print(type(weight_np))
#print(weight_np)
light=weight_np < 21
lowweight=weight_np[light]
print(lowweight)
np_baseball=np.array(baseball)
print(np_baseball.shape)
#Basic Operations on numpy arrays
#
#Statistical Operations on numpy arrays
#
# np_baseball is available
# Print mean height (first column)
avg = np.mean(np_baseball[:,0])
print("Average: " + str(avg))
# Print median height. Replace 'None'
med = np.median(np_baseball[:,0])
print("Median: " + str(med))
# Print out the standard deviation on height. Replace 'None'
stddev = np.std(np_baseball[:,0])
print("Standard Deviation: " + str(stddev))
# Print out correlation between first and second column. Replace 'None'
corr = np.corrcoef(np_baseball[:,0],np_baseball[:,1])
print("Correlation: " + str(corr))
| 177.849315
| 4,931
| 0.484942
| 2,401
| 12,983
| 2.61516
| 0.07122
| 0.110846
| 0.165313
| 0.219143
| 0.084727
| 0.062749
| 0.062749
| 0.062749
| 0.062749
| 0.062749
| 0
| 0.5123
| 0.173381
| 12,983
| 73
| 4,932
| 177.849315
| 0.072773
| 0.028037
| 0
| 0.073171
| 1
| 0
| 0.006347
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02439
| 0
| 0.02439
| 0.170732
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
67bfb2a09270657736e8e4b32cff8a3a6b09b92a
| 141
|
py
|
Python
|
src/tsp_c/__init__.py
|
kjudom/tsp-c
|
2ed4ba83ac14443533e6167edf20a4199e871657
|
[
"MIT"
] | null | null | null |
src/tsp_c/__init__.py
|
kjudom/tsp-c
|
2ed4ba83ac14443533e6167edf20a4199e871657
|
[
"MIT"
] | null | null | null |
src/tsp_c/__init__.py
|
kjudom/tsp-c
|
2ed4ba83ac14443533e6167edf20a4199e871657
|
[
"MIT"
] | null | null | null |
from . import _tsp_c
from .tsp_c import solve_greedy
from .tsp_c import solve_SA
from .tsp_c import set_param_SA
from .tsp_c import solve_PSO
| 28.2
| 31
| 0.829787
| 29
| 141
| 3.655172
| 0.344828
| 0.188679
| 0.301887
| 0.528302
| 0.707547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134752
| 141
| 5
| 32
| 28.2
| 0.868852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
67d3514f1ace46de9127a9a4a21e892c7ad712e0
| 29,708
|
py
|
Python
|
MAIN_FIGURES.py
|
tortugar/Schott_etal_2022
|
5cccec4d59184397df39f0bae3544b9c8294ffe2
|
[
"MIT"
] | null | null | null |
MAIN_FIGURES.py
|
tortugar/Schott_etal_2022
|
5cccec4d59184397df39f0bae3544b9c8294ffe2
|
[
"MIT"
] | null | null | null |
MAIN_FIGURES.py
|
tortugar/Schott_etal_2022
|
5cccec4d59184397df39f0bae3544b9c8294ffe2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 10 18:30:46 2021
@author: fearthekraken
"""
import AS
import pwaves
import sleepy
import pandas as pd
#%%
### FIGURE 1C - example EEGs for NREM, IS, and REM ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'hans_091118n1', ['EEG'], tstart=721.5, tend=728.5, eeg_nbin=4, ylims=[(-0.6, 0.6)]) # NREM EEG
AS.plot_example(ppath, 'hans_091118n1', ['EEG'], tstart=780.0, tend=787.0, eeg_nbin=4, ylims=[(-0.6, 0.6)]) # IS EEG
AS.plot_example(ppath, 'hans_091118n1', ['EEG'], tstart=818.5, tend=825.5, eeg_nbin=4, ylims=[(-0.6, 0.6)]) # REM EEG
#%%
### FIGURE 1E - example photometry recording ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'hans_091118n1', tstart=170, tend=2900, PLOT=['EEG', 'SP', 'EMG_AMP', 'HYPNO', 'DFF'], dff_nbin=1800,
eeg_nbin=130, fmax=25, vm=[50,1800], highres=False, pnorm=0, psmooth=[2,5], flatten_tnrem=4, ma_thr=0)
#%%
### FIGURE 1F - average DF/F signal in each brain state ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
recordings = sleepy.load_recordings(ppath, 'crh_photometry.txt')[1]
df = AS.dff_activity(ppath, recordings, istate=[1,2,3,4], ma_thr=20, flatten_tnrem=4, ma_state=3)
#%%
### FIGURE 1G - example EEG theta burst & DF/F signal ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'hans_091118n1', tstart=2415, tend=2444, PLOT=['SP', 'DFF'], dff_nbin=450, fmax=20,
vm=[0,5], highres=True, recalc_highres=False, nsr_seg=2.5, perc_overlap=0.8, pnorm=1, psmooth=[4,4])
#%%
### FIGURE 1H - average spectral field during REM ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
recordings = sleepy.load_recordings(ppath, 'crh_photometry.txt')[1]
pwaves.spectralfield_highres_mice(ppath, recordings, pre=4, post=4, istate=[1], theta=[1,10,100,1000,10000], pnorm=1,
psmooth=[6,1], fmax=25, nsr_seg=2, perc_overlap=0.8, recalc_highres=True)
#%%
### FIGURE 2B - recorded P-waveforms ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions'
# left - example LFP trace with P-waves
AS.plot_example(ppath, 'Fincher_040221n1', tstart=16112, tend=16119, PLOT=['LFP'], lfp_nbin=7, ylims=[(-0.4, 0.2)])
# right - average P-waveform
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
pwaves.avg_waveform(ppath, recordings, istate=[], win=[0.15,0.15], mode='pwaves', plaser=False, p_iso=0, pcluster=0, clus_event='waves')
#%%
### FIGURE 2C - average P-wave frequency in each brain state ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
istate = [1,2,3,4]; p_iso=0; pcluster=0
_,_,_,_ = pwaves.state_freq(ppath, recordings, istate, plotMode='03', ma_thr=20, flatten_tnrem=4, ma_state=3,
p_iso=p_iso, pcluster=pcluster, ylim2=[-0.3, 0.1])
#%%
### FIGURE 2D - time-normalized P-wave frequency across brain state transitions ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
sequence=[3,4,1,2]; state_thres=[(0,10000)]*len(sequence); nstates=[20,20,20,20]; vm=[0.2, 2.1] # NREM --> IS --> REM --> WAKE
_, mx_pwave, _ = pwaves.stateseq(ppath, recordings, sequence=sequence, nstates=nstates, state_thres=state_thres, ma_thr=20, ma_state=3,
flatten_tnrem=4, fmax=25, pnorm=1, vm=vm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', print_stats=False)
#%%
### FIGURE 2E - example theta burst & P-waves ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/dreadds_processed/'
AS.plot_example(ppath, 'Scrabble_072420n1', tstart=11318.6, tend=11323, PLOT=['SP','EEG','LFP'], eeg_nbin=1, lfp_nbin=6, fmax=20,
vm=[0,4.5], highres=True, recalc_highres=False, nsr_seg=1, perc_overlap=0.85, pnorm=1, psmooth=[4,5])
#%%
### FIGURE 2F - averaged spectral power surrounding P-waves ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
filename = 'sp_win3'
# top - averaged spectrogram
pwaves.avg_SP(ppath, recordings, istate=[1], win=[-3,3], mouse_avg='mouse', plaser=False, pnorm=2, psmooth=[2,2], fmax=25,
vm=[0.8,1.5], pload=filename, psave=filename)
# bottom - averaged high theta power
_ = pwaves.avg_band_power(ppath, recordings, istate=[1], bands=[(8,15)], band_colors=['green'], win=[-3,3], mouse_avg='mouse',
plaser=False, pnorm=2, psmooth=0, ylim=[0.6,1.8], pload=filename, psave=filename)
#%%
### FIGURE 2H - example DF/F signal and P-waves ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'Fritz_032819n1', tstart=2991, tend=2996.75, PLOT=['DFF','LFP_THRES_ANNOT'], dff_nbin=50, lfp_nbin=10)
#%%
### FIGURE 2I - DF/F signal surrounding P-waves ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
# top - diagrams of P-waveforms
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
p_iso=0.8; pcluster=0; clus_event='waves' # single P-waves
#p_iso=0; pcluster=0.1; clus_event='cluster start' # clustered P-waves
pwaves.avg_waveform(ppath, recordings, istate=[], win=[1,1], mode='pwaves', plaser=False, p_iso=p_iso,
pcluster=pcluster, clus_event=clus_event, wform_std=False)
# middle/bottom - heatmaps & average DF/F plots
ppath = '/home/fearthekraken/Documents/Data/photometry'
recordings = sleepy.load_recordings(ppath, 'pwaves_photometry.txt')[1]
# single P-waves
pzscore=[2,2,2]; p_iso=0.8; pcluster=0; ylim=[-0.4,1.0]; vm=[-1,1.5]
iso_mx = pwaves.dff_timecourse(ppath, recordings, istate=0, plotMode='ht', dff_win=[10,10], pzscore=pzscore, mouse_avg='mouse',
base_int=2.5, baseline_start=0, p_iso=p_iso, pcluster=pcluster, clus_event='waves', ylim=ylim, vm=vm,
psmooth=(8,15), ds=1000, sf=1000)[0]
# clustered P-waves
pzscore=[2,2,2]; p_iso=0; pcluster=0.5; ylim=[-0.4,1.0]; vm=[-1,1.5]
clus_mx = pwaves.dff_timecourse(ppath, recordings, istate=0, plotMode='ht', dff_win=[10,10], pzscore=pzscore, mouse_avg='mouse',
base_int=2.5, baseline_start=0, p_iso=p_iso, pcluster=pcluster, clus_event='waves', ylim=ylim, vm=vm,
psmooth=(4,15), ds=1000, sf=1000)[0]
# random points
pzscore=[2,2,2]; p_iso=0.8; pcluster=0; ylim=[-0.4,1.0]; vm=[-1,1.5]
jter_mx = pwaves.dff_timecourse(ppath, recordings, istate=0, plotMode='ht', dff_win=[10,10], pzscore=pzscore, mouse_avg='mouse',
base_int=2.5, baseline_start=0, p_iso=p_iso, pcluster=pcluster, clus_event='waves', ylim=ylim, vm=vm,
psmooth=(8,15), ds=1000, sf=1000, jitter=10)[0]
#%%
### FIGURE 3B - example open loop opto recording ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
AS.plot_example(ppath, 'Huey_082719n1', tstart=12300, tend=14000, PLOT=['LSR', 'SP', 'HYPNO'], fmax=25, vm=[50,1800], highres=False,
pnorm=0, psmooth=[2,2], flatten_tnrem=4, ma_thr=10)
#%%
### FIGURE 3C,D - percent time spent in each brain state surrounding laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_chr2_ol.txt')[1]
BS, t, df = AS.laser_brainstate(ppath, recordings, pre=400, post=520, flatten_tnrem=4, ma_state=3, ma_thr=20, edge=10, sf=0, ci='sem', ylim=[0,80])
#%%
### FIGURE 3E - averaged SPs and frequency band power surrounding laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_chr2_ol.txt')[1]
bands=[(0.5,4), (6,10), (11,15), (55,99)]; band_labels=['delta', 'theta', 'sigma', 'gamma']; band_colors=['firebrick', 'limegreen', 'cyan', 'purple']
AS.laser_triggered_eeg_avg(ppath, recordings, pre=400, post=520, fmax=100, laser_dur=120, pnorm=1, psmooth=3, harmcs=10, iplt_level=2,
vm=[0.6,1.4], sf=7, bands=bands, band_labels=band_labels, band_colors=band_colors, ci=95, ylim=[0.6,1.3])
#%%
### FIGURE 3G - example closed loop opto recording ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
AS.plot_example(ppath, 'Cinderella_022420n1', tstart=7100, tend=10100, PLOT=['LSR', 'SP', 'HYPNO'], fmax=25, vm=[0,1500],
highres=False, pnorm=0, psmooth=[2,3], flatten_tnrem=4, ma_thr=0)
#%%
### FIGURE 3H - closed-loop ChR2 graph ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_chr2_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 3I - eYFP controls for ChR2 ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_yfp_chr2_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 3J - closed-loop iC++ graph ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_ic_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 3K - eYFP controls for iC++ ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_yfp_ic_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 4B - example spontaneous & laser-triggered P-wave ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
AS.plot_example(ppath, 'Huey_101719n1', tstart=5925, tend=5930, PLOT=['LSR', 'EEG', 'LFP'], eeg_nbin=5, lfp_nbin=10)
#%%
### FIGURE 4C,D,E - waveforms & spectral power surrounding P-waves/laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
# top - averaged waveforms surrounding P-waves & laser
filename = 'wf_win025'; wform_win = [0.25,0.25]; istate=[1]
pwaves.avg_waveform(ppath, recordings, istate, mode='pwaves', win=wform_win, mouse_avg='trials', # spontaneous & laser-triggered P-waves
plaser=True, post_stim=0.1, pload=filename, psave=filename, ylim=[-0.3,0.1])
pwaves.avg_waveform(ppath, recordings, istate, mode='lsr', win=wform_win, mouse_avg='trials', # successful & failed laser
plaser=True, post_stim=0.1, pload=filename, psave=filename, ylim=[-0.3,0.1])
# middle - averaged SPs surrounding P-waves & laser
filename = 'sp_win3'; win=[-3,3]; pnorm=2
pwaves.avg_SP(ppath, recordings, istate=[1], mode='pwaves', win=win, plaser=True, post_stim=0.1, # spontaneous & laser-triggered P-waves
mouse_avg='mouse', pnorm=pnorm, psmooth=[(8,8),(8,8)], vm=[(0.82,1.32),(0.8,1.45)],
fmax=25, recalc_highres=False, pload=filename, psave=filename)
pwaves.avg_SP(ppath, recordings, istate=[1], mode='lsr', win=win, plaser=True, post_stim=0.1, # successful & failed laser
mouse_avg='mouse', pnorm=pnorm, psmooth=[(8,8),(8,8)], vm=[(0.82,1.32),(0.6,1.8)],
fmax=25, recalc_highres=False, pload=filename, psave=filename)
# bottom - average high theta power surrounding P-waves & laser
_ = pwaves.avg_band_power(ppath, recordings, istate=[1], mode='pwaves', win=win, plaser=True, # spontaneous & laser-triggered P-waves
post_stim=0.1, mouse_avg='mouse', bands=[(8,15)], band_colors=[('green')],
pnorm=pnorm, psmooth=0, fmax=25, pload=filename, psave=filename, ylim=[0.5,1.5])
# successful and failed laser
_ = pwaves.avg_band_power(ppath, recordings, istate=[1], mode='lsr', win=win, plaser=True, # successful & failed laser
post_stim=0.1, mouse_avg='mouse', bands=[(8,15)], band_colors=[('green')],
pnorm=pnorm, psmooth=0, fmax=25, pload=filename, psave=filename, ylim=[0.5,1.5])
#%%
### FIGURE 4F - spectral profiles: null vs spon vs success lsr vs fail lsr ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
filename = 'sp_win3'
spon_win=[-0.5, 0.5]; lsr_win=[0,1]; collect_win=[-3,3]; frange=[0, 20]; pnorm=2; null=True; null_win=0; null_match='lsr'
df = pwaves.sp_profiles(ppath, recordings, spon_win=spon_win, lsr_win=lsr_win, collect_win=collect_win, frange=frange,
null=null, null_win=null_win, null_match=null_match, plaser=True, post_stim=0.1, pnorm=pnorm,
psmooth=12, mouse_avg='mouse', ci='sem', pload=filename, psave=filename)
#%%
### FIGURE 4G - probability of laser success per brainstate ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
filename = 'lsr_stats'
df = pwaves.get_lsr_stats(ppath, recordings, istate=[1,2,3,4], lsr_jitter=5, post_stim=0.1,
flatten_tnrem=4, ma_thr=20, ma_state=3, psave=filename)
_ = pwaves.lsr_state_success(df, istate=[1,2,3,4]) # true laser success
_ = pwaves.lsr_state_success(df, istate=[1], jstate=[1]) # true vs sham laser success
#%%
### FIGURE 4H - latencies of elicited P-waves to laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
df = pd.read_pickle('lsr_stats.pkl')
pwaves.lsr_pwave_latency(df, istate=1, jitter=True)
#%%
### FIGURE 4I - phase preferences of spontaneous & laser-triggered P-waves ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
filename = 'lsr_phases'
pwaves.lsr_hilbert(ppath, recordings, istate=1, bp_filt=[6,12], min_state_dur=30, stat='perc', mode='pwaves',
mouse_avg='trials', bins=9, pload=filename, psave=filename)
#%%
### FIGURE 5B,C - example recordings of hm3dq + saline vs cno ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
AS.plot_example(ppath, 'Dahl_030321n1', tstart=3960, tend=5210, PLOT=['EEG', 'SP', 'HYPNO', 'EMG_AMP'], eeg_nbin=100, # saline
fmax=25, vm=[15,2200], psmooth=(1,2), flatten_tnrem=4, ma_thr=0, ylims=[[-0.6,0.6],'','',[0,300]])
AS.plot_example(ppath, 'Dahl_031021n1', tstart=3620, tend=4870, PLOT=['EEG', 'SP', 'HYPNO', 'EMG_AMP'], eeg_nbin=100, # CNO
fmax=25, vm=[15,2200], psmooth=(1,2), flatten_tnrem=4, ma_thr=0, ylims=[[-0.6,0.6],'','',[0,300]])
#%%
### FIGURE 5D - hm3dq percent time spent in REM ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5E - hm3dq mean REM duration ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='dur', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5F - hm3dq mean REM frequency ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='freq', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5G - hm3dq percent time spent in Wake/NREM/IS ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
for s in [2,3,4]:
pwaves.pairT_from_df(df.iloc[np.where(df['state']==s)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = ' + str(s) + ' ###')
#%%
### FIGURE 5H - hm3dq probability of IS-->REM transition ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='transition probability', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df, 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5I - example P-waves during NREM-->IS-->REM transitions ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
AS.plot_example(ppath, 'King_071020n1', ['HYPNO', 'EEG', 'LFP'], tstart=16097, tend=16172, ylims=['',(-0.6, 0.6), (-0.3, 0.15)]) # saline
AS.plot_example(ppath, 'King_071520n1', ['HYPNO', 'EEG', 'LFP'], tstart=5600, tend=5675, ylims=['',(-0.6, 0.6), (-0.3, 0.15)]) # CNO
#%%
### FIGURE 5J - hm3dq time-normalized P-wave frequency across brain state transitions ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=True); e=e['0.25']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
sequence=[3,4,1,2]; state_thres=[(0,10000)]*len(sequence); nstates=[20,20,20,20]; cvm=[0.3,2.5]; evm= [0.28,2.2] # NREM --> IS --> REM --> WAKE
mice,cmx,cspe = pwaves.stateseq(ppath, c, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # saline
vm=cvm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
mice,emx,espe = pwaves.stateseq(ppath, e, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # CNO
vm=evm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
# plot timecourses
pwaves.plot_activity_transitions([cmx, emx], [mice, mice], plot_id=['gray', 'blue'], group_labels=['saline', 'cno'],
xlim=nstates, xlabel='Time (normalized)', ylabel='P-waves/s', title='NREM-->tNREM-->REM-->Wake')
#%%
### FIGURE 5K - hm3dq average P-wave frequency in each brain state ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=True); e=e['0.25']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
# top - mean P-wave frequency
mice, x, cf, cw = pwaves.state_freq(ppath, c, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # saline
mice, x, ef, ew = pwaves.state_freq(ppath, e, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # CNO
pwaves.plot_state_freq(x, [mice, mice], [cf, ef], [cw, ew], group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# bottom - change in P-wave frequency from saline to CNO
fdif = (ef-cf)
df = pd.DataFrame(columns=['Mouse','State','Change'])
for i,state in enumerate(x):
df = df.append(pd.DataFrame({'Mouse':mice, 'State':[state]*len(mice), 'Change':fdif[:,i]}))
plt.figure(); sns.barplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='lightblue', ci=68)
sns.swarmplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='black', size=9); plt.show()
# stats
for i,s in enumerate([1,2,3,4]):
p = stats.ttest_rel(cf[:,i], ef[:,i], nan_policy='omit')
print(f'saline vs cno, state={s} -- T={round(p.statistic,3)}, p-value={round(p.pvalue,5)}')
#%%
### FIGURE 5L - hm4di percent time spent in REM ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5M - hm4di mean REM duration ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='dur', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5N - hm4di mean REM frequency ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='freq', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5O - hm4di percent time spent in Wake/NREM/IS ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
for s in [2,3,4]:
pwaves.pairT_from_df(df.iloc[np.where(df['state']==s)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = ' + str(s) + ' ###')
#%%
### FIGURE 5P - hm4di probability of IS-->REM transition ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='transition probability', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df, 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5Q - hm4di time-normalized P-wave frequency across brain state transitions ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=True); e=e['5']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
sequence=[3,4,1,2]; state_thres=[(0,10000)]*len(sequence); nstates=[20,20,20,20]; cvm=[0.3,2.5]; evm= [0.28,2.2] # NREM --> IS --> REM --> WAKE
mice,cmx,cspe = pwaves.stateseq(ppath, c, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # saline
vm=cvm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
mice,emx,espe = pwaves.stateseq(ppath, e, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # CNO
vm=evm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
# plot timecourses
pwaves.plot_activity_transitions([cmx, emx], [mice, mice], plot_id=['gray', 'red'], group_labels=['saline', 'cno'],
xlim=nstates, xlabel='Time (normalized)', ylabel='P-waves/s', title='NREM-->tNREM-->REM-->Wake')
#%%
### FIGURE 5R - hm4di average P-wave frequency in each brain state ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=True); e=e['5']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
# top - mean P-wave frequency
mice, x, cf, cw = pwaves.state_freq(ppath, c, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # saline
mice, x, ef, ew = pwaves.state_freq(ppath, e, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # CNO
pwaves.plot_state_freq(x, [mice, mice], [cf, ef], [cw, ew], group_colors=['gray', 'red'], group_labels=['saline','cno'])
# bottom - change in P-wave frequency from saline to CNO
fdif = (ef-cf)
df = pd.DataFrame(columns=['Mouse','State','Change'])
for i,state in enumerate(x):
df = df.append(pd.DataFrame({'Mouse':mice, 'State':[state]*len(mice), 'Change':fdif[:,i]}))
plt.figure(); sns.barplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='salmon', ci=68)
sns.swarmplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='black', size=9); plt.show()
# stats
for i,s in enumerate([1,2,3,4]):
p = stats.ttest_rel(cf[:,i], ef[:,i], nan_policy='omit')
print(f'saline vs cno, state={s} -- T={round(p.statistic,3)}, p-value={round(p.pvalue,5)}')
| 60.752556
| 150
| 0.660832
| 4,499
| 29,708
| 4.230051
| 0.1018
| 0.014345
| 0.033945
| 0.025432
| 0.826073
| 0.793547
| 0.774211
| 0.750775
| 0.728863
| 0.699753
| 0
| 0.058142
| 0.145483
| 29,708
| 489
| 151
| 60.752556
| 0.691523
| 0.131783
| 0
| 0.665468
| 0
| 0.007194
| 0.193735
| 0.096376
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014388
| 0
| 0.014388
| 0.07554
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
67d3ce8adb8ddc67219cf049efed17f327e1aab1
| 42
|
py
|
Python
|
bitmovin/services/filters/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 44
|
2016-12-12T17:37:23.000Z
|
2021-03-03T09:48:48.000Z
|
bitmovin/services/filters/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 38
|
2017-01-09T14:45:45.000Z
|
2022-02-27T18:04:33.000Z
|
bitmovin/services/filters/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 27
|
2017-02-02T22:49:31.000Z
|
2019-11-21T07:04:57.000Z
|
from .filter_service import FilterService
| 21
| 41
| 0.880952
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
67e4a6a4b62a36140c3ec2606810cde8cf6567ae
| 8,164
|
py
|
Python
|
src/lambda_router/routers.py
|
jpaidoussi/lambda-router
|
c7909e6667f2fc837f34f54ccffcc409e33cebb6
|
[
"BSD-3-Clause"
] | null | null | null |
src/lambda_router/routers.py
|
jpaidoussi/lambda-router
|
c7909e6667f2fc837f34f54ccffcc409e33cebb6
|
[
"BSD-3-Clause"
] | null | null | null |
src/lambda_router/routers.py
|
jpaidoussi/lambda-router
|
c7909e6667f2fc837f34f54ccffcc409e33cebb6
|
[
"BSD-3-Clause"
] | 1
|
2021-03-05T06:50:26.000Z
|
2021-03-05T06:50:26.000Z
|
import json
from typing import Any, Callable, Dict, Optional
import attr
from .interfaces import Event, Router
@attr.s(kw_only=True)
class SingleRoute(Router):
"""
Routes to a single defined route without any conditions.
:param route: The single defined route. Only set via ``add_route``.
"""
route: Optional[Callable] = attr.ib(init=False, default=None)
def add_route(self, *, fn: Callable) -> None:
"""
Adds the single route.
:param fn: The callable to route to.
:type fn: callable
:raises ValueError: Raised when a single route has already been defined.
"""
if self.route is not None:
raise ValueError("Single route is already defined. SingleRoute can only have a single defined route.")
self.route = fn
def get_route(self, *, event: Optional[Event]) -> Callable:
"""
Returns the defined route
:raises ValueError: Raised if no route is defined.
:rtype: callable
"""
if self.route is None:
raise ValueError("No route defined.")
return self.route
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
route = self.get_route(event=event)
return route(event=event)
@attr.s(kw_only=True)
class EventField(Router):
"""
Routes on a the value of the specified top-level ``key`` in the
given ``Event.raw`` dict.
:param key: The name of the top-level key to look for when routing.
:param routes: The routes mapping. Only set via ``add_route``
"""
key: str = attr.ib(kw_only=True)
routes: Dict[str, Callable] = attr.ib(init=False, factory=dict)
def add_route(self, *, fn: Callable, key: str) -> None:
"""
Adds the route with the given key.
:param fn: The callable to route to.
:type fn: callable
:param key: The key to associate the route with.
:type fn: str
"""
self.routes[key] = fn
def get_route(self, *, event: Event) -> Callable:
"""
Returns the matching route for the value of the ``key`` in the
given event.
:raises ValueError: Raised if no route is defined or routing key is
not present in the event.
:rtype: callable
"""
field_value: str = event.raw.get(self.key, None)
if field_value is None:
raise ValueError(f"Routing key ({self.key}) not present in the event.")
try:
return self.routes[field_value]
except KeyError:
raise ValueError(f"No route configured for given field ({field_value}).")
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
route = self.get_route(event=event)
return route(event=event)
@attr.s(kw_only=True)
class SQSMessage:
meta: Dict[str, Any] = attr.ib(factory=dict)
body: Dict[str, Any] = attr.ib(factory=dict)
key: str = attr.ib()
event: Event = attr.ib()
@classmethod
def from_raw_sqs_message(cls, *, raw_message: Dict[str, Any], key_name: str, event: Event):
meta = {}
attributes = raw_message.pop("attributes", None)
if attributes:
meta.update(attributes)
body = body = raw_message.pop("body", "")
message_attribites = raw_message.pop("messageAttributes", None)
key = None
if message_attribites:
key_attribute = message_attribites.get(key_name, None)
if key_attribute is not None:
key = key_attribute["stringValue"]
for k, value in raw_message.items():
meta[k] = value
# Attempt to decode json body.
body = json.loads(body)
return cls(meta=meta, body=body, key=key, event=event)
@attr.s(kw_only=True)
class SQSMessageField(Router):
"""
Processes all message records in a given ``Event``, routing each based on
on the configured key.
:param key: The name of the message-level key to look for when routing.
:param routes: The routes mapping. Only set via ``add_route``
"""
key: str = attr.ib(kw_only=True)
routes: Dict[str, Callable] = attr.ib(init=False, factory=dict)
def _get_message(self, raw_message: Dict[str, Any], event: Event) -> SQSMessage:
return SQSMessage.from_raw_sqs_message(raw_message=raw_message, key_name=self.key, event=event)
def add_route(self, *, fn: Callable, key: str) -> None:
"""
Adds the route with the given key.
:param fn: The callable to route to.
:type fn: callable
:param key: The key to associate the route with.
:type fn: str
"""
self.routes[key] = fn
def get_route(self, *, message: SQSMessage) -> Callable:
"""
Returns the matching route for the value of the ``key`` in the
given message.
:raises ValueError: Raised if no route is defined or routing key is
not present in the message.
:rtype: callable
"""
field_value: str = message.key
if field_value is None:
raise ValueError(f"Routing key ({self.key}) not present in the message.")
try:
return self.routes[field_value]
except KeyError:
raise ValueError(f"No route configured for given field ({field_value}).")
def dispatch(self, *, event: Event) -> Any:
"""
Iterates over all the message records in the given Event and executes the
applicable callable as determined by the configured routes.
:param event: The event to parse for messages.
"""
messages = event.raw.get("Records", None)
if messages is None:
raise ValueError("No messages present in Event.")
for raw_message in messages:
message = self._get_message(raw_message, event=event)
route = self.get_route(message=message)
# Process each message now.
route(message=message)
# SQS Lambdas don't return a value.
return None
@attr.s(kw_only=True)
class GenericSQSMessage(Router):
"""
Routes to a single defined route without any conditions.
:param route: The single defined route. Only set via ``add_route``.
"""
route: Optional[Callable] = attr.ib(init=False, default=None)
def _get_message(self, raw_message: Dict[str, Any], event: Event) -> SQSMessage:
return SQSMessage.from_raw_sqs_message(raw_message=raw_message, key_name=None, event=event)
def add_route(self, *, fn: Callable) -> None:
"""
Adds the single route.
:param fn: The callable to route to.
:type fn: callable
:raises ValueError: Raised when a single route has already been defined.
"""
if self.route is not None:
raise ValueError("Single route is already defined. SingleRoute can only have a single defined route.")
self.route = fn
def get_route(self, *, message: SQSMessage) -> Callable:
"""
Returns the defined route
:raises ValueError: Raised if no route is defined.
:rtype: callable
"""
if self.route is None:
raise ValueError("No route defined.")
return self.route
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
messages = event.raw.get("Records", None)
if messages is None:
raise ValueError("No messages present in Event.")
for raw_message in messages:
message = self._get_message(raw_message, event=event)
route = self.get_route(message=message)
# Process each message now.
route(message=message)
# SQS Lambdas don't return a value.
return None
| 32.268775
| 114
| 0.614037
| 1,063
| 8,164
| 4.647225
| 0.125118
| 0.036437
| 0.030769
| 0.025506
| 0.806478
| 0.77834
| 0.760121
| 0.74919
| 0.739069
| 0.739069
| 0
| 0
| 0.289196
| 8,164
| 252
| 115
| 32.396825
| 0.851284
| 0.312837
| 0
| 0.638095
| 0
| 0
| 0.103828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.038095
| 0.019048
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
db3369b101ea183c503c1fa561b47c91b9100d56
| 36
|
py
|
Python
|
deeptrack/extras/__init__.py
|
Margon01/DeepTrack-2.0_old
|
f4f4abc89ab1f63aeb4722f84dcfb93189c57ccf
|
[
"MIT"
] | 65
|
2020-04-29T01:06:01.000Z
|
2022-03-28T12:44:02.000Z
|
deeptrack/extras/__init__.py
|
Margon01/DeepTrack-2.0_old
|
f4f4abc89ab1f63aeb4722f84dcfb93189c57ccf
|
[
"MIT"
] | 41
|
2020-04-20T16:09:07.000Z
|
2022-03-29T15:40:08.000Z
|
deeptrack/extras/__init__.py
|
Margon01/DeepTrack-2.0_old
|
f4f4abc89ab1f63aeb4722f84dcfb93189c57ccf
|
[
"MIT"
] | 31
|
2020-04-27T18:04:06.000Z
|
2022-03-18T17:24:50.000Z
|
from . import datasets, radialcenter
| 36
| 36
| 0.833333
| 4
| 36
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e1d6a7a8f00c138e84b26623fa12570b059d6d57
| 244
|
py
|
Python
|
src/masonite/contracts/AuthContract.py
|
holic-cl/masonite
|
c5eab7db5f87e389fe83a1f0f20a005035ada9d9
|
[
"MIT"
] | 95
|
2018-02-22T23:54:00.000Z
|
2021-04-17T03:39:21.000Z
|
src/masonite/contracts/AuthContract.py
|
holic-cl/masonite
|
c5eab7db5f87e389fe83a1f0f20a005035ada9d9
|
[
"MIT"
] | 840
|
2018-01-27T04:26:20.000Z
|
2021-01-24T12:28:58.000Z
|
src/masonite/contracts/AuthContract.py
|
holic-cl/masonite
|
c5eab7db5f87e389fe83a1f0f20a005035ada9d9
|
[
"MIT"
] | 100
|
2018-02-23T00:19:55.000Z
|
2020-08-28T07:59:31.000Z
|
from abc import ABC as Contract, abstractmethod
class AuthContract(Contract):
@abstractmethod
def user(self):
pass
@abstractmethod
def save(self):
pass
@abstractmethod
def delete(self):
pass
| 14.352941
| 47
| 0.631148
| 25
| 244
| 6.16
| 0.56
| 0.331169
| 0.285714
| 0.324675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.303279
| 244
| 16
| 48
| 15.25
| 0.905882
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0.272727
| 0.090909
| 0
| 0.454545
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
e1ff64213edb5548904c05273b193883e930a827
| 150
|
py
|
Python
|
examples/simple_regex/routes/__init__.py
|
nekonoshiri/tiny-router
|
3bb808bcc9f9eb368ee390179dfc5e9d48cf8600
|
[
"MIT"
] | null | null | null |
examples/simple_regex/routes/__init__.py
|
nekonoshiri/tiny-router
|
3bb808bcc9f9eb368ee390179dfc5e9d48cf8600
|
[
"MIT"
] | null | null | null |
examples/simple_regex/routes/__init__.py
|
nekonoshiri/tiny-router
|
3bb808bcc9f9eb368ee390179dfc5e9d48cf8600
|
[
"MIT"
] | null | null | null |
from ..router import Router
from . import create_user, get_user
router = Router()
router.include(get_user.router)
router.include(create_user.router)
| 21.428571
| 35
| 0.8
| 22
| 150
| 5.272727
| 0.318182
| 0.258621
| 0.224138
| 0.327586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 150
| 6
| 36
| 25
| 0.859259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c06b4470ee6ba272de73e528bcb01060567707f9
| 142
|
py
|
Python
|
instanotifier/fetcher/scripts/fetcher.py
|
chaudbak/instanotifier
|
d29bc6bd9b7a003403886bfff1376b2c1925cc74
|
[
"MIT"
] | null | null | null |
instanotifier/fetcher/scripts/fetcher.py
|
chaudbak/instanotifier
|
d29bc6bd9b7a003403886bfff1376b2c1925cc74
|
[
"MIT"
] | 6
|
2020-06-06T01:27:17.000Z
|
2022-02-10T11:20:17.000Z
|
instanotifier/fetcher/scripts/fetcher.py
|
chaudbak/instanotifier
|
d29bc6bd9b7a003403886bfff1376b2c1925cc74
|
[
"MIT"
] | null | null | null |
from instanotifier.fetcher import tests
def run():
# is executed when ran with 'manage.py runscript tests'
tests.test_rss_fetcher()
| 20.285714
| 59
| 0.739437
| 20
| 142
| 5.15
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183099
| 142
| 6
| 60
| 23.666667
| 0.887931
| 0.373239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2200800f734e84798d40a112ef14379650a7d44d
| 145
|
py
|
Python
|
tests/test_import.py
|
GoodManWEN/typehints_checker
|
36e2b2f27b4c392543972e8e466f8e48dfeff274
|
[
"MIT"
] | null | null | null |
tests/test_import.py
|
GoodManWEN/typehints_checker
|
36e2b2f27b4c392543972e8e466f8e48dfeff274
|
[
"MIT"
] | null | null | null |
tests/test_import.py
|
GoodManWEN/typehints_checker
|
36e2b2f27b4c392543972e8e466f8e48dfeff274
|
[
"MIT"
] | null | null | null |
import os , sys
sys.path.append(os.getcwd())
import pytest
from typehints_checker import *
@pytest.mark.asyncio
async def test_import():
...
| 18.125
| 31
| 0.737931
| 21
| 145
| 5
| 0.714286
| 0.228571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 145
| 8
| 32
| 18.125
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
22135b653bd172de4f59e045357620ffd83da98a
| 48
|
py
|
Python
|
echolect/millstone/__init__.py
|
ryanvolz/echolect
|
ec2594925f34fdaea69b64e725fccb0c99665a55
|
[
"BSD-3-Clause"
] | 1
|
2022-03-24T22:48:12.000Z
|
2022-03-24T22:48:12.000Z
|
echolect/millstone/__init__.py
|
scivision/echolect
|
ec2594925f34fdaea69b64e725fccb0c99665a55
|
[
"BSD-3-Clause"
] | 1
|
2015-03-25T20:41:24.000Z
|
2015-03-25T20:41:24.000Z
|
echolect/millstone/__init__.py
|
scivision/echolect
|
ec2594925f34fdaea69b64e725fccb0c99665a55
|
[
"BSD-3-Clause"
] | null | null | null |
from .read_hdf5 import *
from .hdf5_api import *
| 24
| 24
| 0.770833
| 8
| 48
| 4.375
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04878
| 0.145833
| 48
| 2
| 25
| 24
| 0.804878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2231aae6662593f94c1874f0078bab296c0ac96f
| 2,104
|
py
|
Python
|
SGE/src/configs/rng_seeds.py
|
dabingrosewood/MasterThesisProj
|
7e40fa2395468a1bccef429362a61ed8515ecc11
|
[
"MIT"
] | null | null | null |
SGE/src/configs/rng_seeds.py
|
dabingrosewood/MasterThesisProj
|
7e40fa2395468a1bccef429362a61ed8515ecc11
|
[
"MIT"
] | null | null | null |
SGE/src/configs/rng_seeds.py
|
dabingrosewood/MasterThesisProj
|
7e40fa2395468a1bccef429362a61ed8515ecc11
|
[
"MIT"
] | null | null | null |
# CONFIG
seeds = [6598903756360202179, 2908409715321502665, 6126375328734039552, 1447957147463681860, 8611858271322161001, 1129180857020570158, 6362222119948958210, 7116573423379052515, 6183551438103583226, 4025455056998962241, 3253052445978017587, 8447055112402476503, 5958072666039141800, 704315598608973559, 1273141716491599966, 8030825590436937002, 6692768176035969914, 8405559442957414941, 5375803109627817298, 1491660193757141856, 3436611086188602011, 3271002097187013328, 4006294871837743001, 7473817498436254932, 7891796310200224764, 3130952787727334893, 697469171142516880, 133987617360269051, 1978176412643604703, 3541943493395593807, 5679145832406031548, 5942005640162452699, 5170695982942106620, 3168218038949114546, 9211443340810713278, 675545486074597116, 3672488441186673791, 6678020899892900267, 2416379871103035344, 8662874560817543122, 2122645477319220395, 2405200782555244715, 6145921643610737337, 5436563232962849112, 8616414727199277108, 3514934091557929937, 6828532625327352397, 4198622582999611227, 1404664771100695607, 2109913995355226572, 7499239331133290294, 1663854912663070382, 8773050872378084951, 847059168652279875, 2080440852605950627, 842456810578794799, 2969610112218411619, 8028963261673713765, 8849431138779094918, 6906452636298562639, 8279891918456160432, 3007521703390185509, 7384090506069372457, 2587992914778556505, 7951640286729988102, 812903075765965116, 4795333953396378316, 1140497104356211676, 8624839892588303806, 5867085452069993348, 8978621560802611959, 8687506047153117100, 1433098622112610322, 2329673189788559167, 1697681906179453583, 1151871187140419944, 7331838985682630168, 2010690807327394179, 8961362099735442061, 3782928183186245068, 8730275423842935904, 2250089307129376711, 6729072114456627667, 6426359511845339057, 1543504526754215874, 6764758859303816569, 438430728757175362, 850249168946095159, 7241624624529922339, 633139235530929889, 8443344843613690342, 5097223086273121, 3838826661110586915, 7425568686759148634, 5814866864074983273, 5375799982976616117, 6540402714944055605, 448708351215739494, 5101380446889426970, 8035666378249198606]
| 701.333333
| 2,094
| 0.901616
| 102
| 2,104
| 18.598039
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.943
| 0.04943
| 2,104
| 2
| 2,095
| 1,052
| 0.0055
| 0.002852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
225438e5c2b8551e69ccb321df71b6704ae2b4d5
| 17
|
py
|
Python
|
2.py
|
modianor/git_project
|
21d664bfa31d6f3e584ffc594514ea4342b6bc3f
|
[
"MIT"
] | null | null | null |
2.py
|
modianor/git_project
|
21d664bfa31d6f3e584ffc594514ea4342b6bc3f
|
[
"MIT"
] | null | null | null |
2.py
|
modianor/git_project
|
21d664bfa31d6f3e584ffc594514ea4342b6bc3f
|
[
"MIT"
] | null | null | null |
A = 1
B = 2
C = 4
| 5.666667
| 5
| 0.352941
| 6
| 17
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.470588
| 17
| 3
| 6
| 5.666667
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2255fb2ff207d881f927e1b321a4dc62c8ca610a
| 17
|
py
|
Python
|
src/ixu/commands/server/__init__.py
|
luanguimaraesla/ixu
|
f213bdf27fc7336a76110cf3f89e30ae1d5a64fb
|
[
"Apache-2.0"
] | 2
|
2021-05-14T17:14:09.000Z
|
2021-06-13T21:35:04.000Z
|
src/ixu/commands/server/__init__.py
|
luanguimaraesla/ixu
|
f213bdf27fc7336a76110cf3f89e30ae1d5a64fb
|
[
"Apache-2.0"
] | null | null | null |
src/ixu/commands/server/__init__.py
|
luanguimaraesla/ixu
|
f213bdf27fc7336a76110cf3f89e30ae1d5a64fb
|
[
"Apache-2.0"
] | null | null | null |
from . import up
| 8.5
| 16
| 0.705882
| 3
| 17
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 17
| 1
| 17
| 17
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
97fe866f84f325af30eccf7ed7f76920a2b9b84a
| 186
|
py
|
Python
|
incapsula/__init__.py
|
zanachka/incapsula-cracker-py3
|
be1738d0e649e91de75583b694372bc04547fa85
|
[
"Unlicense"
] | null | null | null |
incapsula/__init__.py
|
zanachka/incapsula-cracker-py3
|
be1738d0e649e91de75583b694372bc04547fa85
|
[
"Unlicense"
] | null | null | null |
incapsula/__init__.py
|
zanachka/incapsula-cracker-py3
|
be1738d0e649e91de75583b694372bc04547fa85
|
[
"Unlicense"
] | null | null | null |
from .errors import IncapBlocked, MaxRetriesExceeded, RecaptchaBlocked
from .parsers import ResourceParser, WebsiteResourceParser, IframeResourceParser
from .session import IncapSession
| 46.5
| 80
| 0.876344
| 16
| 186
| 10.1875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086022
| 186
| 3
| 81
| 62
| 0.958824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3f01198a019097c1976dc940001aed540d4f3634
| 713
|
py
|
Python
|
old/dea/aws/__init__.py
|
robbibt/odc-tools
|
e2df2c9ef65dbd5652d97cd88617989b4b724814
|
[
"Apache-2.0"
] | null | null | null |
old/dea/aws/__init__.py
|
robbibt/odc-tools
|
e2df2c9ef65dbd5652d97cd88617989b4b724814
|
[
"Apache-2.0"
] | null | null | null |
old/dea/aws/__init__.py
|
robbibt/odc-tools
|
e2df2c9ef65dbd5652d97cd88617989b4b724814
|
[
"Apache-2.0"
] | null | null | null |
from odc.aws import (
ec2_metadata,
ec2_current_region,
botocore_default_region,
auto_find_region,
make_s3_client,
s3_url_parse,
s3_fmt_range,
s3_ls,
s3_ls_dir,
s3_find,
get_boto_session,
get_creds_with_retry,
s3_fetch,
)
from odc.aws._find import (
s3_file_info,
norm_predicate,
parse_query,
)
__all__ = (
"ec2_metadata",
"ec2_current_region",
"botocore_default_region",
"auto_find_region",
"make_s3_client",
"s3_url_parse",
"s3_fmt_range",
"s3_ls",
"s3_ls_dir",
"s3_find",
"get_boto_session",
"get_creds_with_retry",
"s3_fetch",
"s3_file_info",
"norm_predicate",
"parse_query",
)
| 16.97619
| 30
| 0.647966
| 96
| 713
| 4.197917
| 0.354167
| 0.039702
| 0.049628
| 0.104218
| 0.903226
| 0.903226
| 0.903226
| 0.739454
| 0.739454
| 0.739454
| 0
| 0.037313
| 0.248247
| 713
| 41
| 31
| 17.390244
| 0.714552
| 0
| 0
| 0
| 0
| 0
| 0.293128
| 0.032258
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f02d35a7926f58cae17ffac0f474623fde43a2e
| 37,840
|
py
|
Python
|
pybind/slxos/v17r_2_00/mpls_state/rsvp/igp_sync/link/lsp/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_2_00/mpls_state/rsvp/igp_sync/link/lsp/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_2_00/mpls_state/rsvp/igp_sync/link/lsp/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import hops
class lsp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/rsvp/igp-sync/link/lsp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_name','__lsp_instance_id','__path_name','__cspf_enabled','__rro_enabled','__frr_enabled','__nbr_down_enabled','__link_count','__nbr_down_inprogress','__cspf_hop_count','__rro_hop_count','__hops',)
_yang_name = 'lsp'
_rest_name = 'lsp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__path_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__cspf_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__hops = YANGDynClass(base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__nbr_down_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__rro_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__cspf_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__nbr_down_inprogress = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__lsp_instance_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__rro_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__frr_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__link_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'rsvp', u'igp-sync', u'link', u'lsp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'rsvp', u'igp-sync', u'link', u'lsp']
def _get_lsp_name(self):
"""
Getter method for lsp_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_name (string)
YANG Description: LSP name
"""
return self.__lsp_name
def _set_lsp_name(self, v, load=False):
"""
Setter method for lsp_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_name() directly.
YANG Description: LSP name
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__lsp_name = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_name(self):
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_lsp_instance_id(self):
"""
Getter method for lsp_instance_id, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_instance_id (uint32)
YANG Description: Instance id of the lsp instance
"""
return self.__lsp_instance_id
def _set_lsp_instance_id(self, v, load=False):
"""
Setter method for lsp_instance_id, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_instance_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_instance_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_instance_id() directly.
YANG Description: Instance id of the lsp instance
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_instance_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__lsp_instance_id = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_instance_id(self):
self.__lsp_instance_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_path_name(self):
"""
Getter method for path_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/path_name (string)
YANG Description: LSP Path name
"""
return self.__path_name
def _set_path_name(self, v, load=False):
"""
Setter method for path_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/path_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_path_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_path_name() directly.
YANG Description: LSP Path name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """path_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__path_name = t
if hasattr(self, '_set'):
self._set()
def _unset_path_name(self):
self.__path_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_cspf_enabled(self):
"""
Getter method for cspf_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_enabled (boolean)
YANG Description: CSPF enabled for LSP
"""
return self.__cspf_enabled
def _set_cspf_enabled(self, v, load=False):
"""
Setter method for cspf_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_cspf_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cspf_enabled() directly.
YANG Description: CSPF enabled for LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cspf_enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__cspf_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_cspf_enabled(self):
self.__cspf_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_rro_enabled(self):
"""
Getter method for rro_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_enabled (boolean)
YANG Description: RRO enabled for LSP
"""
return self.__rro_enabled
def _set_rro_enabled(self, v, load=False):
"""
Setter method for rro_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_rro_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rro_enabled() directly.
YANG Description: RRO enabled for LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rro_enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__rro_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_rro_enabled(self):
self.__rro_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_frr_enabled(self):
"""
Getter method for frr_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/frr_enabled (boolean)
YANG Description: FRR enabled for LSP
"""
return self.__frr_enabled
def _set_frr_enabled(self, v, load=False):
"""
Setter method for frr_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/frr_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_frr_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_frr_enabled() directly.
YANG Description: FRR enabled for LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """frr_enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__frr_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_frr_enabled(self):
self.__frr_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_nbr_down_enabled(self):
"""
Getter method for nbr_down_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_enabled (boolean)
YANG Description: LSP Neighbour down is enabled
"""
return self.__nbr_down_enabled
def _set_nbr_down_enabled(self, v, load=False):
"""
Setter method for nbr_down_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_nbr_down_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nbr_down_enabled() directly.
YANG Description: LSP Neighbour down is enabled
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nbr_down_enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__nbr_down_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_nbr_down_enabled(self):
self.__nbr_down_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_link_count(self):
"""
Getter method for link_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/link_count (uint32)
YANG Description: Total links used by the LSP
"""
return self.__link_count
def _set_link_count(self, v, load=False):
"""
Setter method for link_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/link_count (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_count() directly.
YANG Description: Total links used by the LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_count must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__link_count = t
if hasattr(self, '_set'):
self._set()
def _unset_link_count(self):
self.__link_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_nbr_down_inprogress(self):
"""
Getter method for nbr_down_inprogress, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_inprogress (boolean)
YANG Description: Neighbor down processing is in progress
"""
return self.__nbr_down_inprogress
def _set_nbr_down_inprogress(self, v, load=False):
"""
Setter method for nbr_down_inprogress, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_inprogress (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_nbr_down_inprogress is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nbr_down_inprogress() directly.
YANG Description: Neighbor down processing is in progress
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nbr_down_inprogress must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__nbr_down_inprogress = t
if hasattr(self, '_set'):
self._set()
def _unset_nbr_down_inprogress(self):
self.__nbr_down_inprogress = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_cspf_hop_count(self):
"""
Getter method for cspf_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_hop_count (uint32)
YANG Description: CSPF hop count
"""
return self.__cspf_hop_count
def _set_cspf_hop_count(self, v, load=False):
"""
Setter method for cspf_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_hop_count (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_cspf_hop_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cspf_hop_count() directly.
YANG Description: CSPF hop count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cspf_hop_count must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__cspf_hop_count = t
if hasattr(self, '_set'):
self._set()
def _unset_cspf_hop_count(self):
self.__cspf_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_rro_hop_count(self):
"""
Getter method for rro_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_hop_count (uint32)
YANG Description: RRO hop rout
"""
return self.__rro_hop_count
def _set_rro_hop_count(self, v, load=False):
"""
Setter method for rro_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_hop_count (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_rro_hop_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rro_hop_count() directly.
YANG Description: RRO hop rout
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rro_hop_count must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__rro_hop_count = t
if hasattr(self, '_set'):
self._set()
def _unset_rro_hop_count(self):
self.__rro_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_hops(self):
"""
Getter method for hops, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/hops (list)
YANG Description: MPLS Rsvp IGP Synchronization Hop information
"""
return self.__hops
def _set_hops(self, v, load=False):
"""
Setter method for hops, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/hops (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_hops is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hops() directly.
YANG Description: MPLS Rsvp IGP Synchronization Hop information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hops must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__hops = t
if hasattr(self, '_set'):
self._set()
def _unset_hops(self):
self.__hops = YANGDynClass(base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
lsp_name = __builtin__.property(_get_lsp_name)
lsp_instance_id = __builtin__.property(_get_lsp_instance_id)
path_name = __builtin__.property(_get_path_name)
cspf_enabled = __builtin__.property(_get_cspf_enabled)
rro_enabled = __builtin__.property(_get_rro_enabled)
frr_enabled = __builtin__.property(_get_frr_enabled)
nbr_down_enabled = __builtin__.property(_get_nbr_down_enabled)
link_count = __builtin__.property(_get_link_count)
nbr_down_inprogress = __builtin__.property(_get_nbr_down_inprogress)
cspf_hop_count = __builtin__.property(_get_cspf_hop_count)
rro_hop_count = __builtin__.property(_get_rro_hop_count)
hops = __builtin__.property(_get_hops)
_pyangbind_elements = {'lsp_name': lsp_name, 'lsp_instance_id': lsp_instance_id, 'path_name': path_name, 'cspf_enabled': cspf_enabled, 'rro_enabled': rro_enabled, 'frr_enabled': frr_enabled, 'nbr_down_enabled': nbr_down_enabled, 'link_count': link_count, 'nbr_down_inprogress': nbr_down_inprogress, 'cspf_hop_count': cspf_hop_count, 'rro_hop_count': rro_hop_count, 'hops': hops, }
| 66.737213
| 754
| 0.742072
| 5,297
| 37,840
| 5.031527
| 0.039834
| 0.044274
| 0.05463
| 0.035119
| 0.905561
| 0.869916
| 0.847629
| 0.840687
| 0.820351
| 0.815549
| 0
| 0.008447
| 0.130233
| 37,840
| 566
| 755
| 66.855124
| 0.801349
| 0.181554
| 0
| 0.507886
| 0
| 0.037855
| 0.365698
| 0.19831
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123028
| false
| 0
| 0.028391
| 0
| 0.26183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f15b4889cdf171226bf2916a6b9994712b58560
| 56,576
|
py
|
Python
|
tests/learning/test_prediction_error_delta_function.py
|
mihaic/psyneulink
|
3d2fc3117c82bccc92fc585add330b0f9b35c830
|
[
"Apache-2.0"
] | null | null | null |
tests/learning/test_prediction_error_delta_function.py
|
mihaic/psyneulink
|
3d2fc3117c82bccc92fc585add330b0f9b35c830
|
[
"Apache-2.0"
] | null | null | null |
tests/learning/test_prediction_error_delta_function.py
|
mihaic/psyneulink
|
3d2fc3117c82bccc92fc585add330b0f9b35c830
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from psyneulink import PredictionErrorDeltaFunction
np.set_printoptions(suppress=True)
def test_prediction_error_delta_first_run():
learning_rate = 0.3
stimulus_onset = 41
sample = np.zeros(60)
sample[stimulus_onset:] = 1
reward_onset = 54
target = np.zeros(60)
target[reward_onset] = 1
delta_function = PredictionErrorDeltaFunction()
delta_vals = np.zeros((60, 60))
weights = np.zeros(60)
for t in range(60):
print("Timestep {}".format(t))
new_sample = sample * weights
# print("sample = {}".format(new_sample))
delta_vals[t] = delta_function.function(variable=[new_sample, target])
print("delta: {}".format(delta_vals[t]))
for i in range(59):
weights[i] = weights[i] + learning_rate * sample[i] * \
delta_vals[t][i + 1]
validation_array = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3,
0.7, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09,
0.42000000000000004, 0.49, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.027, 0.189,
0.44100000000000006, 0.34299999999999997, 0.0,
0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0081, 0.0756,
0.2646, 0.4116, 0.24009999999999998, 0.0, 0.0,
0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.00243, 0.02835, 0.1323,
0.3087, 0.3601500000000001,
0.16806999999999994, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0007289999999999999,
0.010206, 0.05953499999999999, 0.18522,
0.32413500000000006, 0.30252599999999996,
0.117649, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.00021869999999999998, 0.0035721,
0.025004699999999998, 0.09724049999999998,
0.2268945, 0.31765230000000005,
0.24706289999999997, 0.08235429999999999, 0.0,
0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 6.560999999999999e-05,
0.0012247199999999999, 0.01000188, 0.04667544,
0.1361367, 0.25412184, 0.29647548,
0.19765032000000005, 0.05764800999999997, 0.0,
0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.9682999999999998e-05, 0.000413343,
0.003857868, 0.021003947999999998,
0.073513818, 0.171532242, 0.26682793199999993,
0.2668279320000001, 0.15564962699999996,
0.040353607000000014, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
5.904899999999999e-06, 0.000137781,
0.0014467005, 0.009001692,
0.036756909000000004, 0.1029193452,
0.200120949, 0.26682793199999993,
0.2334744405000001, 0.12106082099999993,
0.028247524900000043, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.7714699999999997e-06,
4.5467729999999994e-05, 0.00053045685,
0.0037131979500000002, 0.0173282571,
0.05660563986, 0.13207982633999998,
0.2201330439, 0.25682188454999993,
0.19975035465000013, 0.09321683216999987,
0.019773267430000074, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
5.314409999999999e-07, 1.4880347999999997e-05,
0.00019096446599999996, 0.00148527918,
0.0077977156950000005, 0.029111471928000003,
0.07924789580399999, 0.15849579160799998,
0.23113969609499996, 0.23970042558000004,
0.16779029790600009, 0.07118376274799987,
0.013841287201000085, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
1.5943229999999994e-07, 4.8361131e-06,
6.770558339999998e-05, 0.0005792588802,
0.0033790101345, 0.014191842564900003,
0.044152399090799994, 0.1030222645452,
0.18028896295409996, 0.23370791494049992,
0.21812738727780012, 0.1388083373586,
0.05398102008389993, 0.009688901040700082,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.61026623e-06,
2.3696954189999994e-05, 0.00022117157244,
0.00141918425649, 0.006622859863620001,
0.023180009522670002, 0.06181335872711999,
0.12620227406787, 0.19631464855001995,
0.22903375664168996, 0.19433167230204007,
0.11336014217619006, 0.040693384370939945,
0.006782230728490046, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
8.719352486999997e-06, 8.2939339665e-05,
0.000580575377655, 0.002980286938629,
0.011590004761335003, 0.034770014284004995,
0.08113003332934499, 0.14723598641251498,
0.20613038097752096, 0.218623131339795,
0.1700402132642851, 0.09156011483461501,
0.03052003827820493, 0.0047475615099430435,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
3.3601154386499996e-05,
0.00023223015106199995, 0.0013004888459472001,
0.0055632022854408, 0.018544007618136,
0.048678019997607, 0.100961819254296,
0.16490430478201676, 0.20987820608620322,
0.20404825591714193, 0.1464961837353841,
0.07324809186769199, 0.02278829524772641,
0.0033232930569601082, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.00010327019970509998, 0.0005527077595275599,
0.0025793028777952804, 0.00945744388524936,
0.0275842113319773, 0.0643631597746137,
0.12014456491261222, 0.17839647517327273,
0.2081292210354848, 0.18678263426261454,
0.12452175617507655, 0.05811015288170229,
0.016948794590496474, 0.002326305139872087,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.00026908252756336796, 0.0011606862950078762,
0.004642745180031503, 0.014895474119267742,
0.038617895864768215, 0.08109758131601326,
0.13762013799081035, 0.18731629893193635,
0.2017252450036237, 0.1681043708363532,
0.10459827518706422, 0.045761745394340525,
0.012562047755309225, 0.0016284135979104386,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0006172884160657308, 0.002205303960514964,
0.007718563861802375, 0.022012200642917885,
0.05136180150014173, 0.09805434831845238,
0.15252898627314818, 0.1916389827534426,
0.1916389827534425, 0.1490525421415665,
0.08694731624924712, 0.03580183610263121,
0.009281957508089578, 0.0011398895185372737,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.00127887960422022,
0.0038592819309011877, 0.012006654896137028,
0.030817080900085034, 0.06536956554563493,
0.11439673970486111, 0.1642619852172365,
0.1916389827534426, 0.1788630505698796,
0.13042097437387068, 0.07160367220526243,
0.027845872524268733, 0.006839337111223864,
0.0007979226629760694, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0024366641834905763, 0.006303493820471939,
0.01764978269732143, 0.04118282629375,
0.08007771779340278, 0.12935631335857373,
0.17247508447809834, 0.1878062030983737,
0.16433042771107698, 0.11277578372328811,
0.058476332300964384, 0.021543911900355206,
0.005026912776749604, 0.0005585458640832153,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0043277123296321576, 0.009707380483526788,
0.024709695776250002, 0.05285129374364584,
0.09486129646295406, 0.1422919446944311,
0.17707442006418095, 0.18076347048218466,
0.1488640345147404, 0.09648594829659096,
0.047396606180781564, 0.01658881216327357,
0.0036864027029497315, 0.0003909821048582174,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.007239926474690194,
0.014208075071343751, 0.03315217516646875,
0.06545429455943831, 0.10909049093239716,
0.15272668730535605, 0.17818113518958212,
0.17119363969195134, 0.13315060864929562,
0.08175914566184805, 0.03815426797552923,
0.012718089325176374, 0.0026977765235223217,
0.00027368747340072996, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.011502348996093318,
0.01989130509988125, 0.04284281098435962,
0.07854515347132598, 0.1221813498442848,
0.16036302167062388, 0.17608488654029292,
0.15978073037915452, 0.11773316975306136,
0.0686776823559524, 0.030523414380423386,
0.009711995484680158, 0.0019705498084858775,
0.00019158123138052208, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.017469740526057695,
0.02677675686522476, 0.053553513730449524,
0.09163601238321362, 0.13363585139218653,
0.1650795811315246, 0.17119363969195145,
0.14716646219132656, 0.10301652353392865,
0.05723140196329368, 0.02427998871170045,
0.0073895617818218184, 0.0014368592353543042,
0.00013410686196635435, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.02550276758562512,
0.03480978392479219, 0.06497826332627876,
0.10423596408590546, 0.14306897031398796,
0.16691379869965267, 0.16398548644176403,
0.13392148059410713, 0.08928098706273813,
0.0473459779878157, 0.01921286063273686,
0.005603751017881575, 0.0010460335233379858,
9.387480337641474e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.035945702763062776,
0.04386032774523817, 0.07675557355416678,
0.1158858659543302, 0.1502224188296874,
0.16603530502228608, 0.15496628468746698,
0.12052933253469633, 0.07670048434026144,
0.03890604278129206, 0.015130127748280264,
0.004236435769518487, 0.0007603859073495034,
6.571236236352362e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.04910380108663422,
0.05372890148791675, 0.0884946612742158,
0.12618683181693738, 0.15496628468746698,
0.16271459892184037, 0.1446351990416358,
0.10738067807636587, 0.06536215187257055,
0.0317732682713886, 0.011862020154651653,
0.0031936208108678255, 0.0005519838438536873,
4.5998653654510946e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.06522247153300925,
0.06415862942380646, 0.09980231243703228,
0.13482066767809628, 0.157290778957779,
0.157290778957779, 0.13345884275205488,
0.09477512021522716, 0.05528548679221601,
0.025799893836367493, 0.009261500351516516,
0.002401129720763562, 0.000400188286794001,
3.219905755813546e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.08447006036015119,
0.07485173432777421, 0.1103078190093515,
0.14156170106200106, 0.157290778957779,
0.15014119809606175, 0.12185372599100663,
0.08292823018832374, 0.04643980890546151,
0.02083837579091219, 0.007203389162290574,
0.0018008472905727269, 0.0002897915180232191,
2.2539340290728127e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.10692558065848345,
0.0854885597322474, 0.11968398362514637,
0.1462804244307344, 0.15514590469926381,
0.14165495646454518, 0.11017607725020184,
0.07198170380346502, 0.038759378971096714,
0.016747879802325727, 0.005582626600775242,
0.0013475305588078745, 0.00020961586470347182,
1.5777538203476382e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.13257214857815766,
0.0957471869001171, 0.12766291586682277,
0.14894006851129327, 0.15109862022884823,
0.13221129270024212, 0.09871776521618081,
0.062015006353754565, 0.032155929220465396,
0.013398303841860582, 0.0043120977881849765,
0.0010061561505766425, 0.00015146436675339547,
1.1044276742477876e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.16129630464819278,
0.10532190559012883, 0.13404606166016392,
0.14958763402655972, 0.1454324219702664,
0.12216323445502375, 0.08770693755745296,
0.05305728321376779, 0.02652864160688395,
0.0106724420257579, 0.003320315296902465,
0.0007497486154296462, 0.0001093383397501313,
7.730993719756718e-06, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.19289287632523142,
0.11393915241113936, 0.13870853337008265,
0.1483410704096717, 0.13845166571569367,
0.11182634538575253, 0.07731204125434732,
0.045098690731702695, 0.021771781732546125,
0.008466804007101203, 0.0025491452924606417,
0.0005576255327258695, 7.885613594094121e-05,
5.411695603863009e-06, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.22707462204857323,
0.12136996669882236, 0.14159829448195937,
0.14537424900147833, 0.1304640696167113,
0.10147205414633098, 0.06764803609755388,
0.038100618031955746, 0.017780288414912637,
0.00669150639270899, 0.0019516893645402655,
0.0004139947136904132, 5.682280383978444e-05,
3.7881869227041065e-06, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.26348561205821996,
0.12743846503376344, 0.14273108083781505,
0.14090119518604827, 0.12176646497559718,
0.09132484873169788, 0.058783810677874415,
0.03200451914684277, 0.014453653808251588,
0.005269561284258373, 0.001490380969285332,
0.00030684314073514685, 4.091241876469365e-05,
2.6517308459039768e-06, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.301717151568349,
0.1320262497749789, 0.14218211514228507,
0.13516077612291288, 0.11263398010242742,
0.08156253731555085, 0.05075002321856492,
0.026739259545265348, 0.011698426051053645,
0.004135807189766472, 0.001135319620720332,
0.00022706392414395538,
2.9434212389101155e-05, 1.856211592099477e-06,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.3413250265008427,
0.13507300938517075, 0.14007571343647335,
0.12840273731676732, 0.10331254726636441,
0.07231878308645512, 0.04354679411657503,
0.02222700949700185, 0.009429640392667471,
0.0032356609190525853, 0.0008628429117474301,
0.000167775010617488, 2.116081215008947e-05,
1.2993481144363273e-06, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.3818469293163939,
0.1365738206005615, 0.13657382060056156,
0.12087568030164642, 0.09401441801239163,
0.06368718639549109, 0.03715085873070312,
0.018387798765701513, 0.00757144655058295,
0.0025238155168610943, 0.0006543225414084031,
0.00012379075107726845,
1.5202372939393527e-05, 9.09543680149838e-07,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.4228190754965624,
0.13657382060056145, 0.13186437851088706,
0.11281730161487002, 0.08491624852732138,
0.05572628809605473, 0.031521940741202625,
0.01514289310116601, 0.006057157240466293,
0.0019629676242253202, 0.0004951630043090738,
9.121423763591707e-05, 1.0914524161576011e-05,
6.366805761492955e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.4637912216767308,
0.13516098797365916, 0.12615025544208192,
0.10444698568860544, 0.07615926039794141,
0.04846498388959908, 0.026608226449191696,
0.012417172342956029, 0.0048289003555940235,
0.0015226262382503908, 0.0003739783743071934,
6.712432359357035e-05, 7.831171085936894e-06,
4.4567640333781355e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.5043395180688286,
0.13245776821418598, 0.11963927451603895,
0.09596066810140624, 0.06785097744543866,
0.04190795665747693, 0.02235091021732094,
0.010140690746747505, 0.003837018120390834,
0.0011780318790675093, 0.00028192215909306206,
4.933637784132472e-05, 5.6155226810794545e-06,
3.119734823808784e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.5440768485330844,
0.12861222010474194, 0.11253569259164908,
0.087527760904616, 0.06006807120905011,
0.03604084272543018, 0.0186878443761489,
0.008249588958840537, 0.0030393222479937476,
0.0009091989630751751, 0.00021214642471756306,
3.6220121293228935e-05, 4.024457921469882e-06,
2.183814377110238e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.582660514564507,
0.12378926185081407, 0.1050333130855392,
0.07928985399594612, 0.05285990266396423,
0.030834943220645727, 0.015556367750956368,
0.006686508945586533, 0.0024002852625182314,
0.0007000832015678915, 0.00015936853369025172,
2.6561422281634606e-05,
2.8826349763866332e-06,
1.5286700638661443e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.6197972931197512,
0.11816247722123163, 0.09731027535866121,
0.0713608685963516, 0.04625241483096865,
0.02625137057973892, 0.012895410109345473,
0.005400641840666021, 0.0018902246442331627,
0.0005378688012045441, 0.00011952640026768879,
1.9457786090026907e-05,
2.0637045854421388e-06,
1.0700690444842564e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.6552460362861207,
0.1119068166624605, 0.08952545332996831,
0.0638283324667368, 0.04025210155559966,
0.02224458243862093, 0.010646979628741615,
0.004347516681736163, 0.0014845178913245327,
0.000412366080923543, 8.950581601441243e-05,
1.4239561638595966e-05,
1.4766952811662293e-06, 7.490483311389795e-08,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.6888180812848588,
0.10519240766271287, 0.0818163170709989,
0.05675546319339564, 0.03484984582050599,
0.018765301595657147, 0.008757140744639957,
0.003488617044612674, 0.0011628723482043357,
0.0003155080014507483, 6.69259397017008e-05,
1.0410701731355942e-05,
1.0561581467172232e-06,
5.2433383190830796e-08, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.7203758035836726,
0.0981795804851987, 0.07429806090771796,
0.05018377798152873, 0.030024482553051346,
0.01576285334035199, 0.007176583634631806,
0.0027908936356900726, 0.0009086630441783594,
0.00024093338292596744, 4.997136831064175e-05,
7.604338655986531e-06, 7.550407176148966e-07,
3.670336823358156e-08, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.7498296777292321,
0.09101512461195449, 0.06706377602986124,
0.04413598935298546, 0.025745993789241584,
0.013186972428635979, 0.005860876634949275,
0.002226224458236503, 0.0007083441458026751,
0.00018364477854138084, 3.726125941427849e-05,
5.549549274452836e-06, 5.395395127338887e-07,
2.569235779681378e-08, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.7771342151128184,
0.08382972003732658, 0.06018544002679849,
0.03861899068386232, 0.02197828738105989,
0.010989143690529946, 0.004770480981935443,
0.0017708603645063548, 0.0005509343356242535,
0.00013972972280329454,
2.7747746372375204e-05, 4.046546345892743e-06,
3.853853662860729e-07, 1.7984650435565186e-08,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8022831311240164,
0.07673643603416813, 0.05371550522391766,
0.033626779693021636, 0.018681544273900896,
0.009123544877951528, 0.0038705947967068166,
0.0014048825558417022, 0.00042757295177797694,
0.00010613512987400764,
2.0637386364374954e-05,
2.9481980520218443e-06,
2.7516515155312504e-07, 1.258925530489563e-08,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8253040619342669,
0.06983015679109295, 0.04768888756464884,
0.029143209067285403, 0.015814144455116086,
0.0075476598535781925, 0.00313088112444726,
0.0011116896746226068, 0.00033114160520675284,
8.048580682107342e-05, 1.5330629870691226e-05,
2.146288181847922e-06, 1.9639238268975845e-07,
8.812478746733632e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8462531089715948,
0.06318777602315973, 0.04212518401543974,
0.025144489683634585, 0.013334199074654718,
0.006222626234838935, 0.002525123689499864,
0.0008775252537979172, 0.0002559448656910268,
6.0939253735958765e-05,
1.1375327364060439e-05,
1.5613194420671661e-06, 1.401184115401577e-07,
6.168735078304621e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8652094417785428,
0.0568689984208437, 0.037030975715898196,
0.02160140250094056, 0.011200727222710039,
0.005113375471237247, 0.002030844158789291,
0.0006910511373657835, 0.000197443182104462,
4.607007582446698e-05, 8.431124987495764e-06,
1.1349591328979614e-06, 9.993350857939731e-08,
4.3181145326087744e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8822701413047959,
0.05091759160936005, 0.03240210375141095,
0.018481199917471325, 0.009374521697268268,
0.004188616077502871, 0.0016289062523622277,
0.0005429687507872982, 0.0001520312502205634,
3.4778390573309004e-05, 6.242275231160832e-06,
8.244514455579832e-07, 7.124889034315629e-08,
3.022680217235063e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8975454187876039,
0.04536294525197537, 0.028225832601229017,
0.015749196451410374, 0.007818750011338693,
0.0034207031299606783, 0.0013031250018897822,
0.00042568750061722227,
0.00011685539232642039, 2.621755597065345e-05,
4.616928095502182e-06, 5.984906790157396e-07,
5.078102727207323e-08, 2.115876140962314e-09,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.9111543023631965,
0.04022181145675141, 0.024482841756283458,
0.013370062519388881, 0.006499335946925311,
0.0027854296915393872, 0.0010398937515080364,
0.0003330378681299928, 8.96640414196348e-05,
1.9737367608074763e-05, 3.411396870545147e-06,
4.341777835037419e-07, 3.6181481921637726e-08,
1.4811133430825407e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.9232208458002219,
0.035500120546611, 0.02114900798521513,
0.011308844547649799, 0.005385164070309534,
0.002261768909530004, 0.0008278369864945789,
0.0002600257201169631, 6.868603927612238e-05,
1.4839576386815878e-05,
2.5182311443883165e-06,
3.1477889306241735e-07, 2.577137137027563e-08,
1.0367793290555483e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.9338708819642052,
0.031194786778192207, 0.018196958953945574,
0.009531740404447708, 0.0044481455220757304,
0.0018315893326192878, 0.0006574936065812942,
0.0002026238158647775, 5.253210040934153e-05,
1.1143172814032098e-05,
1.8571954689683423e-06, 2.280766365769793e-07,
1.8350993724602915e-08, 7.257455747478048e-10,
0.0, 0.0, 0.0, 0.0, 0.0],
])
for i in range(len(delta_vals)):
deltas = delta_vals[i]
validation_deltas = validation_array[i]
np.testing.assert_allclose(deltas, validation_deltas, atol=1e-08,
err_msg="mismatch on timestep {}".format(i))
| 72.255428
| 80
| 0.352128
| 7,468
| 56,576
| 2.663765
| 0.108731
| 0.572262
| 0.849193
| 1.119992
| 0.292967
| 0.292967
| 0.292967
| 0.292967
| 0.292967
| 0.292967
| 0
| 0.694022
| 0.513893
| 56,576
| 783
| 81
| 72.255428
| 0.029307
| 0.000689
| 0
| 0.406494
| 0
| 0
| 0.000761
| 0
| 0
| 0
| 0
| 0
| 0.001299
| 1
| 0.001299
| false
| 0
| 0.002597
| 0
| 0.003896
| 0.003896
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f1d2166206051864985cc1f8d2162c4a056737f
| 13,796
|
py
|
Python
|
flask_demo/main.py
|
yzj2019/database_learning
|
a9260799f96010674bb4077180ee45a51481e832
|
[
"MIT"
] | null | null | null |
flask_demo/main.py
|
yzj2019/database_learning
|
a9260799f96010674bb4077180ee45a51481e832
|
[
"MIT"
] | null | null | null |
flask_demo/main.py
|
yzj2019/database_learning
|
a9260799f96010674bb4077180ee45a51481e832
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import functools
from flask import Flask, session
from flask import redirect
from flask import request, make_response
from flask import render_template
from flask import url_for
from flask_bootstrap import Bootstrap
# 数据库处理
from db import *
# json
import json
# 生成一个app
app = Flask(__name__, instance_relative_config=True)
bootstrap=Bootstrap(app)
app.secret_key = 'lab3'
# 对app执行请求页面地址到函数的绑定
@app.route("/", methods=("GET", "POST"))
@app.route("/login", methods=("GET", "POST"))
def login():
"""Log in a registered user by adding the user id to the session."""
if request.method == "POST":
# 客户端在login页面发起的POST请求
username = request.form["username"]
password = request.form["password"]
ipaddr = request.form["ipaddr"]
database = request.form["database"]
db = MyDefSQL(username, password, ipaddr, database)
err = db.login()
if err != '0':
return render_template("login_fail.html", err=err)
else:
#print(err)
session['username'] = username
session['password'] = password
session['ipaddr'] = ipaddr
session['database'] = database
return redirect(url_for('home'))
else :
# 客户端GET 请求login页面时
return render_template("login.html")
# 主页面
@app.route("/home", methods=(["GET", "POST"]))
def home():
return render_template("home.html")
# 请求url为host/table的页面返回结果
@app.route("/table", methods=(["GET", "POST"]))
def table():
# 出于简单考虑,每次请求都需要连接数据库,可以尝试使用其它context保存数据库连接
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showtablecnt()
if request.method == "POST":
if 'clear' in request.form:
return render_template("table.html", rows = '', dbname=session['database'])
elif 'search' in request.form:
return render_template("table.html", rows = tabs, dbname=session['database'])
else:
return render_template("table.html", rows = tabs, dbname=session['database'])
# 客户管理页面
@app.route("/customer", methods=(["GET", "POST"]))
def customer():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showcustomer()
if tabs==None:
tabs=""
if request.method == "POST":
if 'search' in request.form:
# 是由search表单提交的post请求
searchinfo = {}
# print(len(request.form[u"客户身份证号"]))
for key,value in request.form.items():
# 注意这里key和value仍然是unicode编码,统一在db.py中处理!
if len(value) != 0 and key!='search':
# 做第一层过滤,使得可以表单中某块信息不填
searchinfo[key] = value
tabs = db.customer_search(searchinfo)
return render_template("customer.html", rows = tabs, dbname=session['database'])
# 其它删改查需求,是由Ajax提交的post
datas = json.loads(request.get_data(as_text=True))
function = datas["function"]
datas = datas["inputdata"]
# print(function)
# print(datas[0][u"客户身份证号"])
if function == "delete":
res = {'info':'删除成功!', 'errs':[]}
for data in datas:
err = db.customer_del(data)
if err != '0':
res['errs'].append([data[u"客户身份证号"],err])
if len(res['errs']) != 0:
res['info'] = "删除失败!"
return json.dumps(res)
elif function == "insert":
res = {'info':'插入成功!', 'errs':[]}
for data in datas:
err = db.customer_insert(data)
if err != '0':
res['errs'].append([data[u"客户身份证号"],err])
if len(res['errs']) != 0:
res['info'] = "插入失败!"
return json.dumps(res)
elif function == "update":
res = {'info':'修改成功!', 'errs':[]}
for data in datas:
err = db.customer_update(data)
if err != '0':
res['errs'].append([data[u"客户身份证号"],err])
if len(res['errs']) != 0:
res['info'] = "修改失败!"
return json.dumps(res)
else:
return render_template("customer.html", rows = tabs, dbname=session['database'])
# 账户管理页面
# 储蓄账户
@app.route("/account/saving", methods=(["GET", "POST"]))
def saving():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showaccount(True)
if tabs==None:
tabs=""
if request.method == "POST":
if 'search' in request.form:
# 是由search表单提交的post请求
searchinfo = {}
for key,value in request.form.items():
# 注意这里key和value仍然是unicode编码,统一在db.py中处理!
if len(value) != 0 and key!='search':
# 做第一层过滤,使得可以表单中某块信息不填
searchinfo[key] = value
tabs = db.account_search(searchinfo, True)
return render_template("account_saving.html", rows = tabs, dbname=session['database'])
# 其它删改查需求,是由Ajax提交的post
datas = json.loads(request.get_data(as_text=True))
function = datas["function"]
datas = datas["inputdata"]
# print(function)
if function == "delete":
res = {'info':'删除成功!', 'errs':[]}
for data in datas:
err = db.account_del(data, True)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "删除失败!"
return json.dumps(res)
elif function == "insert":
res = {'info':'插入成功!', 'errs':[]}
for data in datas:
err = db.account_insert(data, True)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "插入失败!"
return json.dumps(res)
elif function == "update":
res = {'info':'修改成功!', 'errs':[]}
for data in datas:
err = db.account_update(data, True)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "修改失败!"
return json.dumps(res)
else:
return render_template("account_saving.html", rows = tabs, dbname=session['database'])
# 支票账户
@app.route("/account/checking", methods=(["GET", "POST"]))
def checking():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showaccount(False)
if tabs==None:
tabs=""
if request.method == "POST":
if 'search' in request.form:
# 是由search表单提交的post请求
searchinfo = {}
for key,value in request.form.items():
# 注意这里key和value仍然是unicode编码,统一在db.py中处理!
if len(value) != 0 and key!='search':
# 做第一层过滤,使得可以表单中某块信息不填
searchinfo[key] = value
tabs = db.account_search(searchinfo, False)
return render_template("account_checking.html", rows = tabs, dbname=session['database'])
# 其它删改查需求,是由Ajax提交的post
datas = json.loads(request.get_data(as_text=True))
function = datas["function"]
datas = datas["inputdata"]
# print(function)
if function == "delete":
res = {'info':'删除成功!', 'errs':[]}
for data in datas:
err = db.account_del(data, False)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "删除失败!"
return json.dumps(res)
elif function == "insert":
res = {'info':'插入成功!', 'errs':[]}
for data in datas:
err = db.account_insert(data, False)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "插入失败!"
return json.dumps(res)
elif function == "update":
res = {'info':'修改成功!', 'errs':[]}
for data in datas:
err = db.account_update(data, False)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "修改失败!"
return json.dumps(res)
else:
return render_template("account_checking.html", rows = tabs, dbname=session['database'])
# 贷款管理页面
@app.route("/loan", methods=(["GET", "POST"]))
def loan():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showloan()
if tabs==None:
tabs=""
if request.method == "POST":
if 'search' in request.form:
# 是由search表单提交的post请求
searchinfo = {}
for key,value in request.form.items():
# 注意这里key和value仍然是unicode编码,统一在db.py中处理!
if len(value) != 0 and key!='search':
# 做第一层过滤,使得可以表单中某块信息不填
searchinfo[key] = value
tabs = db.loan_search(searchinfo)
return render_template("loan.html", rows = tabs, dbname=session['database'])
# 其它删改查需求,是由Ajax提交的post
datas = json.loads(request.get_data(as_text=True))
function = datas["function"]
datas = datas["inputdata"]
# print(function)
if function == "delete":
res = {'info':'删除成功!', 'errs':[]}
for data in datas:
err = db.loan_del(data)
if err != '0':
res['errs'].append([data[u"贷款号"],err])
if len(res['errs']) != 0:
res['info'] = "删除失败!"
return json.dumps(res)
elif function == "insert":
res = {'info':'插入成功!', 'errs':[]}
for data in datas:
err = db.loan_insert(data)
if err != '0':
res['errs'].append([data[u"贷款号"],err])
if len(res['errs']) != 0:
res['info'] = "插入失败!"
return json.dumps(res)
elif function == "release":
res = {'info':'贷款发放成功!', 'errs':[]}
for data in datas:
err = db.loan_release(data)
if err != '0':
res['errs'].append([data[u"贷款号"],err])
if len(res['errs']) != 0:
res['info'] = "贷款发放失败!"
return json.dumps(res)
else:
return render_template("loan.html", rows = tabs, dbname=session['database'])
# 业务统计
# 按月
@app.route("/statistic/month")
def month():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.statistic_month()
return render_template("statistic.html", how = u'月份', rows = tabs, dbname=session['database'])
# 按季度
@app.route("/statistic/quarter")
def quarter():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.statistic_quarter()
return render_template("statistic.html", how = u'季度', rows = tabs, dbname=session['database'])
# 按年
@app.route("/statistic/year")
def year():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.statistic_year()
return render_template("statistic.html", how = u'年份', rows = tabs, dbname=session['database'])
# 测试新html页面
@app.route("/test")
def test():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showtablecnt()
return render_template("test.html", rows = tabs)
# 测试URL下返回html page
@app.route("/hello")
def hello():
return "hello world!"
#返回不存在页面的处理
@app.errorhandler(404)
def not_found(e):
return render_template("404.html")
if __name__ == "__main__":
app.run(host = "0.0.0.0", debug=True)
| 34.318408
| 101
| 0.508771
| 1,424
| 13,796
| 4.872191
| 0.116573
| 0.051888
| 0.054771
| 0.039349
| 0.75382
| 0.733641
| 0.733641
| 0.717642
| 0.705967
| 0.697463
| 0
| 0.004628
| 0.342201
| 13,796
| 402
| 102
| 34.318408
| 0.75989
| 0.06183
| 0
| 0.706271
| 0
| 0
| 0.135356
| 0.003362
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042904
| false
| 0.039604
| 0.029703
| 0.009901
| 0.211221
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f3831fca3eb8519b2004ca6b866229be692631e
| 91
|
py
|
Python
|
rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/lineFinder/__init__.py
|
RhinohawkUAV/rh_ros
|
e13077060bdfcc231adee9731ebfddadcd8d6b4a
|
[
"MIT"
] | 4
|
2020-05-13T19:34:27.000Z
|
2021-09-20T09:01:10.000Z
|
rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/lineFinder/__init__.py
|
RhinohawkUAV/rh_ros
|
e13077060bdfcc231adee9731ebfddadcd8d6b4a
|
[
"MIT"
] | null | null | null |
rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/lineFinder/__init__.py
|
RhinohawkUAV/rh_ros
|
e13077060bdfcc231adee9731ebfddadcd8d6b4a
|
[
"MIT"
] | 2
|
2019-09-14T14:45:09.000Z
|
2020-11-22T01:46:59.000Z
|
from linePathSegment import LinePathSegment
from lineSegmentFinder import LineSegmentFinder
| 45.5
| 47
| 0.923077
| 8
| 91
| 10.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 91
| 2
| 47
| 45.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
450a8b0c8c6133dd03a986ca11b5d16bc7850c24
| 9,945
|
py
|
Python
|
test_fast_ndimage.py
|
grlee77/skimage_accel_demos
|
96606ca27c8c622733958c01620bc55e616319db
|
[
"BSD-3-Clause"
] | null | null | null |
test_fast_ndimage.py
|
grlee77/skimage_accel_demos
|
96606ca27c8c622733958c01620bc55e616319db
|
[
"BSD-3-Clause"
] | null | null | null |
test_fast_ndimage.py
|
grlee77/skimage_accel_demos
|
96606ca27c8c622733958c01620bc55e616319db
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from numpy.testing import assert_allclose, run_module_suite
from fast_ndimage import (
median_filter, sobel, convolve, correlate, gaussian_filter,
gaussian_filter1d, uniform_filter, uniform_filter1d)
def test_median_filter():
rtol = atol = 1e-7
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape).astype(np.float32)
for mode in ['reflect', ]:
kwargs = dict(mode=mode, size=3)
result_ndi = median_filter(x, backend='ndimage', **kwargs)
result_opencv = median_filter(x, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_sobel_filter():
rtol = atol = 1e-7
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for axis in [0, 1]:
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode, axis=axis)
result_ndi = sobel(x, backend='ndimage', **kwargs)
result_opencv = sobel(x, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
axis = 0
mode = 'reflect'
for scale in [0.5, 1, 2, None]:
for delta in [0, 0.5, 2]:
kwargs = dict(mode=mode, axis=axis, scale=scale, delta=delta)
result_ndi = sobel(x[:, 0], backend='ndimage', **kwargs)
result_opencv = sobel(x[:, 0], backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_uniform_filter():
rtol = atol = 1e-7
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode, size=(2, 3))
result_ndi = uniform_filter(x, backend='ndimage', **kwargs)
result_opencv = uniform_filter(x, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for squared in [False, True]:
for normalize in [False, True]:
kwargs = dict(size=3, mode='reflect', normalize=normalize,
squared=squared)
result_ndi = uniform_filter(x, backend='ndimage', **kwargs)
result_opencv = uniform_filter(x, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for size in [5, (5, 6), (6, 5), 6]:
for origin in [-2, -1, 0, 1, 2, (0, 0), (1, 1), (0, 1), (2, 1),
(-1, -2)]:
kwargs = dict(mode='reflect', size=size, origin=origin)
result_ndi = uniform_filter(x, backend='ndimage', **kwargs)
result_opencv = uniform_filter(x, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_uniform_filter1d():
rtol = atol = 1e-7
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
size = 3
for axis in [0, 1, -1]:
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode)
result_ndi = uniform_filter1d(x, size, axis, backend='ndimage', **kwargs)
result_opencv = uniform_filter1d(x, size, axis, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for squared in [False, True]:
for normalize in [False, True]:
kwargs = dict(mode='reflect', normalize=normalize,
squared=squared)
result_ndi = uniform_filter1d(x, size, axis, backend='ndimage', **kwargs)
result_opencv = uniform_filter1d(x, size, axis, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for origin in [-1, 0, 1]:
kwargs = dict(mode='reflect', origin=origin)
result_ndi = uniform_filter1d(x, size, axis, backend='ndimage', **kwargs)
result_opencv = uniform_filter1d(x, size, axis, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_gaussian_filter():
rtol = atol = 1e-12
shape = (63, 64)
sigma = (1.5, 3)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode)
result_ndi = gaussian_filter(x, sigma, backend='ndimage', **kwargs)
result_opencv = gaussian_filter(x, sigma, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
mode = 'reflect'
for truncate in [1, 1.1, 1.5, 2, 4, 5]:
kwargs = dict(mode=mode, truncate=truncate)
result_ndi = gaussian_filter(x, sigma, backend='ndimage', **kwargs)
result_opencv = gaussian_filter(x, sigma, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_gaussian_filter1d():
rtol = atol = 1e-12
shape = (63, 64)
sigma = 2.5
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for axis in [0, 1, -1]:
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode)
result_ndi = gaussian_filter1d(x, sigma, axis, backend='ndimage',
**kwargs)
result_opencv = gaussian_filter1d(x, sigma, axis, backend='opencv',
**kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
mode = 'reflect'
for truncate in [1, 2]:
kwargs = dict(mode=mode, truncate=truncate, axis=axis)
result_ndi = gaussian_filter1d(x, sigma, backend='ndimage',
**kwargs)
result_opencv = gaussian_filter1d(x, sigma, backend='opencv',
**kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_convolve():
rtol = atol = 1e-12
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
weights = rstate.standard_normal((3, 6))
func = convolve
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for delta in [0, -0.5, 2]:
kwargs = dict(mode='reflect', delta=delta)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for origin in [-1, 0, 1, (0, 0), (1, 1)]:
kwargs = dict(mode='reflect', origin=origin)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
# TODO: test threading
def test_correlate():
rtol = atol = 1e-12
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
weights = rstate.standard_normal((4, 4))
func = correlate
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for delta in [0, -0.5, 2]:
kwargs = dict(mode='reflect', delta=delta)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for origin in [-1, 0, 1, (0, 0), (1, 1)]:
kwargs = dict(mode='reflect', origin=origin)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
# TODO: assert_raises ValueError on origin=(-1, 1) etc.
if __name__ == "__main__":
run_module_suite()
| 44.2
| 173
| 0.627954
| 1,275
| 9,945
| 4.762353
| 0.079216
| 0.056324
| 0.062582
| 0.081357
| 0.908267
| 0.898057
| 0.876318
| 0.857543
| 0.847991
| 0.811594
| 0
| 0.041738
| 0.2363
| 9,945
| 224
| 174
| 44.397321
| 0.757735
| 0.140774
| 0
| 0.682635
| 0
| 0
| 0.06272
| 0
| 0
| 0
| 0
| 0.004464
| 0.11976
| 1
| 0.047904
| false
| 0
| 0.017964
| 0
| 0.065868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
451f9b7ff4174b43f88b83397cc76cc631f10347
| 148
|
py
|
Python
|
app/captcha/handlers/verify.py
|
huioo/tornadoWeb
|
001efbae9815b30d8a0c0b4ba8819cc711b99dc4
|
[
"Apache-2.0"
] | null | null | null |
app/captcha/handlers/verify.py
|
huioo/tornadoWeb
|
001efbae9815b30d8a0c0b4ba8819cc711b99dc4
|
[
"Apache-2.0"
] | null | null | null |
app/captcha/handlers/verify.py
|
huioo/tornadoWeb
|
001efbae9815b30d8a0c0b4ba8819cc711b99dc4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.web
class Handler(tornado.web.RequestHandler):
def initialize(self):
pass
| 18.5
| 42
| 0.662162
| 19
| 148
| 5.157895
| 0.894737
| 0.204082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008264
| 0.182432
| 148
| 8
| 43
| 18.5
| 0.801653
| 0.283784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
18bcc995a7294c17a7102d9ddff9a88a24d958f1
| 27
|
py
|
Python
|
itsnp/__init__.py
|
CaffeineDuck/itsnp-discord-bot
|
73d8fddc282c0fbc3cdaef81eef3efa9dccacfd8
|
[
"MIT"
] | null | null | null |
itsnp/__init__.py
|
CaffeineDuck/itsnp-discord-bot
|
73d8fddc282c0fbc3cdaef81eef3efa9dccacfd8
|
[
"MIT"
] | null | null | null |
itsnp/__init__.py
|
CaffeineDuck/itsnp-discord-bot
|
73d8fddc282c0fbc3cdaef81eef3efa9dccacfd8
|
[
"MIT"
] | null | null | null |
from .bot import ItsnpBot
| 13.5
| 26
| 0.777778
| 4
| 27
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 1
| 27
| 27
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
18d163664110bd63d5393ef2d5efd9b345f52613
| 38
|
py
|
Python
|
researchutils/task/__init__.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | 1
|
2018-09-06T00:54:49.000Z
|
2018-09-06T00:54:49.000Z
|
researchutils/task/__init__.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | 28
|
2018-08-25T03:54:30.000Z
|
2018-10-14T12:09:47.000Z
|
researchutils/task/__init__.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | null | null | null |
from researchutils.task import plotter
| 38
| 38
| 0.894737
| 5
| 38
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e17db1fd4e96affffe66942426ac284e73e8b345
| 10,463
|
py
|
Python
|
tests/base/test_endpoints_authentication.py
|
rapydo/http-api
|
ef0a299173195145303069534d45d446ea4da93a
|
[
"MIT"
] | 8
|
2018-07-04T09:54:46.000Z
|
2022-03-17T08:21:06.000Z
|
tests/base/test_endpoints_authentication.py
|
rapydo/http-api
|
ef0a299173195145303069534d45d446ea4da93a
|
[
"MIT"
] | 19
|
2018-04-18T07:24:55.000Z
|
2022-03-04T01:03:15.000Z
|
tests/base/test_endpoints_authentication.py
|
rapydo/http-api
|
ef0a299173195145303069534d45d446ea4da93a
|
[
"MIT"
] | 7
|
2018-07-03T12:17:50.000Z
|
2021-05-05T04:33:32.000Z
|
from restapi.connectors import Connector
from restapi.env import Env
from restapi.services.authentication import BaseAuthentication, Role
from restapi.tests import API_URI, BaseTests, FlaskClient
from restapi.utilities.logs import log
class TestApp(BaseTests):
def test_no_auth(self, client: FlaskClient) -> None:
r = client.get(f"{API_URI}/tests/noauth")
assert r.status_code == 200
assert self.get_content(r) == "OK"
if Env.get_bool("AUTH_ENABLE"):
headers, _ = self.do_login(client, None, None)
# Tokens are ignored
r = client.get(f"{API_URI}/tests/noauth", headers=headers)
assert r.status_code == 200
assert self.get_content(r) == "OK"
# Tokens are ignored even if invalid
r = client.get(
f"{API_URI}/tests/noauth", headers={"Authorization": "Bearer invalid"}
)
assert r.status_code == 200
assert self.get_content(r) == "OK"
def test_auth(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
r = client.get(f"{API_URI}/tests/authentication")
assert r.status_code == 401
r = client.get(
f"{API_URI}/tests/authentication",
headers={"Authorization": "Bearer invalid"},
)
assert r.status_code == 401
headers, token = self.do_login(client, None, None)
r = client.get(f"{API_URI}/tests/authentication", headers=headers)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
if not Env.get_bool("ALLOW_ACCESS_TOKEN_PARAMETER"):
# access token parameter is not allowed by default
r = client.get(
f"{API_URI}/tests/authentication", query_string={"access_token": token}
)
assert r.status_code == 401
def test_optional_auth(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
# Optional authentication can accept missing tokens
r = client.get(f"{API_URI}/tests/optionalauthentication")
assert r.status_code == 204
headers, token = self.do_login(client, None, None)
# Or valid tokens
r = client.get(f"{API_URI}/tests/optionalauthentication", headers=headers)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
# But not invalid tokens, i.e. if presented the tokens is always validated
r = client.get(
f"{API_URI}/tests/authentication",
headers={"Authorization": "Bearer invalid"},
)
assert r.status_code == 401
if not Env.get_bool("ALLOW_ACCESS_TOKEN_PARAMETER"):
# access token parameter is not allowed by default
r = client.get(
f"{API_URI}/tests/optionalauthentication",
query_string={"access_token": token},
)
# query token is ignored but the endpoint accepts missing tokens
assert r.status_code == 204
r = client.get(
f"{API_URI}/tests/optionalauthentication",
query_string={"access_token": "invalid"},
)
# invalid tokens should be rejected, but query token is ignored
assert r.status_code == 204
def test_access_token_parameter(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
r = client.get(f"{API_URI}/tests/queryauthentication")
assert r.status_code == 401
r = client.get(
f"{API_URI}/tests/queryauthentication",
headers={"Authorization": "Bearer invalid"},
)
assert r.status_code == 401
headers, token = self.do_login(client, None, None)
r = client.get(f"{API_URI}/tests/queryauthentication", headers=headers)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
r = client.get(
f"{API_URI}/tests/queryauthentication", query_string={"access_token": token}
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
r = client.get(
f"{API_URI}/tests/queryauthentication",
query_string={"access_token": "invalid"},
)
assert r.status_code == 401
def test_optional_access_token_parameter(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
# Optional authentication can accept missing tokens
r = client.get(f"{API_URI}/tests/optionalqueryauthentication")
assert r.status_code == 204
headers, token = self.do_login(client, None, None)
# Or valid tokens
r = client.get(f"{API_URI}/tests/optionalqueryauthentication", headers=headers)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
# But not invalid tokens, i.e. if presented the tokens is always validated
r = client.get(
f"{API_URI}/tests/optionalqueryauthentication",
headers={"Authorization": "Bearer invalid"},
)
assert r.status_code == 401
r = client.get(
f"{API_URI}/tests/optionalqueryauthentication",
query_string={"access_token": token},
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
r = client.get(
f"{API_URI}/tests/optionalqueryauthentication",
query_string={"access_token": "invalid"},
)
# invalid tokens should be rejected, but query token is ignored
assert r.status_code == 401
def test_authentication_with_multiple_roles(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
r = client.get(f"{API_URI}/tests/manyrolesauthentication")
assert r.status_code == 401
r = client.get(f"{API_URI}/tests/unknownroleauthentication")
assert r.status_code == 401
admin_headers, _ = self.do_login(client, None, None)
r = client.get(
f"{API_URI}/tests/manyrolesauthentication", headers=admin_headers
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
r = client.get(
f"{API_URI}/tests/unknownroleauthentication", headers=admin_headers
)
assert r.status_code == 401
if Env.get_bool("MAIN_LOGIN_ENABLE"):
uuid, data = self.create_user(client, roles=[Role.USER])
user_header, _ = self.do_login(
client, data.get("email"), data.get("password")
)
r = client.get(
f"{API_URI}/tests/manyrolesauthentication", headers=user_header
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == data.get("email")
r = client.get(
f"{API_URI}/tests/unknownroleauthentication", headers=user_header
)
assert r.status_code == 401
self.delete_user(client, uuid)
def test_authentication_with_auth_callback(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
auth = Connector.get_authentication_instance()
user = auth.get_user(username=BaseAuthentication.default_user)
assert user is not None
VALID = f"/tests/preloadcallback/{user.uuid}"
INVALID = "/tests/preloadcallback/12345678-90ab-cdef-1234-567890abcdef"
admin_headers, _ = self.do_login(client, None, None)
# Verify both endpoint ...
r = client.get(
f"{API_URI}{VALID}", query_string={"test": True}, headers=admin_headers
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == user.email
r = client.get(
f"{API_URI}{INVALID}", query_string={"test": True}, headers=admin_headers
)
assert r.status_code == 401
# and get_schema!
r = client.get(
f"{API_URI}{VALID}",
query_string={"get_schema": True},
headers=admin_headers,
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, list)
assert len(content) == 1
assert content[0]["key"] == "test"
assert content[0]["type"] == "boolean"
r = client.get(
f"{API_URI}{INVALID}",
query_string={"get_schema": True},
headers=admin_headers,
)
assert r.status_code == 401
| 35.228956
| 88
| 0.60346
| 1,189
| 10,463
| 5.158957
| 0.105971
| 0.032279
| 0.052168
| 0.057385
| 0.862406
| 0.843006
| 0.841539
| 0.825888
| 0.7791
| 0.684056
| 0
| 0.017253
| 0.29093
| 10,463
| 296
| 89
| 35.347973
| 0.809543
| 0.062793
| 0
| 0.666667
| 0
| 0
| 0.192339
| 0.11716
| 0
| 0
| 0
| 0
| 0.342342
| 1
| 0.031532
| false
| 0.004505
| 0.022523
| 0
| 0.085586
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bedcd44ac29b275e927dc09d0e22f32d04f7138a
| 59
|
py
|
Python
|
pyds/heap/__init__.py
|
nitinkatyal1314/data-structures
|
2e7f5b99a6b09cea48f729682d9431b72afbfd7a
|
[
"MIT"
] | 6
|
2021-04-06T18:14:59.000Z
|
2021-07-18T03:26:03.000Z
|
pyds/heap/__init__.py
|
nitinkatyal1314/data-structures
|
2e7f5b99a6b09cea48f729682d9431b72afbfd7a
|
[
"MIT"
] | null | null | null |
pyds/heap/__init__.py
|
nitinkatyal1314/data-structures
|
2e7f5b99a6b09cea48f729682d9431b72afbfd7a
|
[
"MIT"
] | null | null | null |
from .api import HeapAPI as Heap
from .api import HeapType
| 19.666667
| 32
| 0.79661
| 10
| 59
| 4.7
| 0.7
| 0.297872
| 0.553191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 59
| 2
| 33
| 29.5
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
beee49868a956aa3196803cdf539676b921996ae
| 11,496
|
py
|
Python
|
senlin-7.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
senlin-7.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
senlin-7.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
import webob
from senlin.api.common import version_request as vr
from senlin.api.common import wsgi
from senlin.api.middleware import version_negotiation as vn
from senlin.common import exception
from senlin.tests.unit.common import base
@mock.patch("senlin.api.openstack.versions.Controller")
class VersionNegotiationTest(base.SenlinTestCase):
def test_get_version_controller(self, mock_vc):
gvc = mock_vc.return_value
xvc = mock.Mock()
gvc.get_controller = mock.Mock(return_value=xvc)
vnf = vn.VersionNegotiationFilter(None, None)
request = webob.Request({})
res = vnf._get_controller('v1.0', request)
self.assertEqual(xvc, res)
self.assertEqual(1, request.environ['api.major'])
self.assertEqual(0, request.environ['api.minor'])
gvc.get_controller.assert_called_once_with('1.0')
def test_get_version_controller_shorter_version(self, mock_vc):
gvc = mock_vc.return_value
xvc = mock.Mock()
gvc.get_controller = mock.Mock(return_value=xvc)
vnf = vn.VersionNegotiationFilter(None, None)
request = webob.Request({})
res = vnf._get_controller('v1', request)
self.assertEqual(xvc, res)
self.assertEqual(1, request.environ['api.major'])
self.assertEqual(0, request.environ['api.minor'])
gvc.get_controller.assert_called_once_with('1.0')
def test_get_controller_not_match_version(self, mock_vc):
gvc = mock_vc.return_value
gvc.get_controller = mock.Mock(return_value=None)
vnf = vn.VersionNegotiationFilter(None, None)
request = webob.Request({})
res = vnf._get_controller("invalid", request)
self.assertIsNone(res)
self.assertEqual(0, gvc.get_controller.call_count)
def test_request_path_is_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
request = webob.Request({'PATH_INFO': 'versions'})
response = vnf.process_request(request)
self.assertIs(mock_vc.return_value, response)
def test_request_path_is_empty(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
request = webob.Request({'PATH_INFO': '/'})
response = vnf.process_request(request)
self.assertIs(mock_vc.return_value, response)
def test_request_path_contains_valid_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
gvc = mock_vc.return_value
x_controller = mock.Mock()
gvc.get_controller = mock.Mock(return_value=x_controller)
mock_check = self.patchobject(vnf, '_check_version_request')
major = 1
minor = 0
request = webob.Request({'PATH_INFO': 'v1.0/resource'})
response = vnf.process_request(request)
self.assertIsNone(response)
self.assertEqual(major, request.environ['api.major'])
self.assertEqual(minor, request.environ['api.minor'])
gvc.get_controller.assert_called_once_with('1.0')
mock_check.assert_called_once_with(request, x_controller)
def test_removes_version_from_request_path(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
expected_path = 'resource'
request = webob.Request({'PATH_INFO': 'v1.0/%s' % expected_path})
response = vnf.process_request(request)
self.assertIsNone(response)
self.assertEqual(expected_path, request.path_info_peek())
def test_simple_version_on_request_path(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
fake_vc = mock.Mock(return_value={'foo': 'bar'})
self.patchobject(vnf.versions_app, 'get_controller',
return_value=fake_vc)
request = webob.Request({'PATH_INFO': 'v1'})
response = vnf.process_request(request)
self.assertEqual({'foo': 'bar'}, response)
def test_full_version_on_request_path(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
fake_vc = mock.Mock(return_value={'foo': 'bar'})
self.patchobject(vnf.versions_app, 'get_controller',
return_value=fake_vc)
request = webob.Request({'PATH_INFO': 'v1.0'})
response = vnf.process_request(request)
self.assertEqual({'foo': 'bar'}, response)
def test_request_path_contains_unknown_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
gvc = mock_vc.return_value
gvc.get_controller = mock.Mock(return_value=None)
self.patchobject(vnf, '_check_version_request')
request = webob.Request({'PATH_INFO': 'v2.0/resource'})
request.headers['Accept'] = '*/*'
response = vnf.process_request(request)
self.assertIs(mock_vc.return_value, response)
def test_accept_header_contains_valid_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
major = 1
minor = 0
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = 'application/vnd.openstack.clustering-v1.0'
response = vnf.process_request(request)
self.assertIsNone(response)
self.assertEqual(major, request.environ['api.major'])
self.assertEqual(minor, request.environ['api.minor'])
def test_accept_header_contains_simple_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
fake_vc = mock.Mock(return_value={'foo': 'bar'})
self.patchobject(vnf.versions_app, 'get_controller',
return_value=fake_vc)
major = 1
minor = 0
request = webob.Request({'PATH_INFO': ''})
request.headers['Accept'] = 'application/vnd.openstack.clustering-v1.0'
response = vnf.process_request(request)
self.assertEqual(major, request.environ['api.major'])
self.assertEqual(minor, request.environ['api.minor'])
self.assertEqual({'foo': 'bar'}, response)
def test_accept_header_contains_unknown_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = 'application/vnd.openstack.clustering-v2.0'
response = vnf.process_request(request)
self.assertIsNone(response)
request.headers['Accept'] = 'application/vnd.openstack.clustering-vab'
response = vnf.process_request(request)
self.assertIsInstance(response, webob.exc.HTTPNotFound)
def test_no_URI_version_accept_with_invalid_MIME_type(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
gvc = mock_vc.return_value
gvc.get_controller = mock.Mock(side_effect=[None, None])
self.patchobject(vnf, '_check_version_request')
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = 'application/invalidMIMEType'
response = vnf.process_request(request)
self.assertIsInstance(response, webob.exc.HTTPNotFound)
request.headers['Accept'] = ''
response = vnf.process_request(request)
self.assertEqual(gvc, response)
def test_check_version_request(self, mock_vc):
controller = mock.Mock()
minv = vr.APIVersionRequest('1.0')
maxv = vr.APIVersionRequest('1.3')
controller.min_api_version = mock.Mock(return_value=minv)
controller.max_api_version = mock.Mock(return_value=maxv)
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'clustering 1.0,compute 2.0'
vnf = vn.VersionNegotiationFilter(None, None)
vnf._check_version_request(request, controller)
self.assertIsNotNone(request.version_request)
expected = vr.APIVersionRequest('1.0')
self.assertEqual(expected, request.version_request)
def test_check_version_request_default(self, mock_vc):
controller = mock.Mock()
controller.DEFAULT_API_VERSION = "1.0"
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'compute 2.0'
vnf = vn.VersionNegotiationFilter(None, None)
vnf._check_version_request(request, controller)
self.assertIsNotNone(request.version_request)
expected = vr.APIVersionRequest(controller.DEFAULT_API_VERSION)
self.assertEqual(expected, request.version_request)
def test_check_version_request_invalid_format(self, mock_vc):
controller = mock.Mock()
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'clustering 2.03'
vnf = vn.VersionNegotiationFilter(None, None)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
vnf._check_version_request,
request, controller)
self.assertEqual("API Version String '2.03' is of invalid format. It "
"must be of format 'major.minor'.",
six.text_type(ex))
def test_check_version_request_invalid_version(self, mock_vc):
controller = mock.Mock()
minv = vr.APIVersionRequest('1.0')
maxv = vr.APIVersionRequest('1.100')
controller.min_api_version = mock.Mock(return_value=minv)
controller.max_api_version = mock.Mock(return_value=maxv)
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'clustering 2.3'
vnf = vn.VersionNegotiationFilter(None, None)
ex = self.assertRaises(exception.InvalidGlobalAPIVersion,
vnf._check_version_request,
request, controller)
expected = ("Version '2.3' is not supported by the API. Minimum is "
"'%(min_ver)s' and maximum is '%(max_ver)s'." %
{'min_ver': str(minv), 'max_ver': str(maxv)})
self.assertEqual(expected, six.text_type(ex))
def test_check_version_request_latest(self, mock_vc):
controller = mock.Mock()
controller.max_api_version = mock.Mock(return_value='12.34')
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'clustering Latest'
vnf = vn.VersionNegotiationFilter(None, None)
vnf._check_version_request(request, controller)
self.assertIsNotNone(request.version_request)
expected = '12.34'
self.assertEqual(expected, request.version_request)
| 40.336842
| 79
| 0.673626
| 1,357
| 11,496
| 5.482682
| 0.134856
| 0.022581
| 0.025538
| 0.084274
| 0.811559
| 0.785349
| 0.763844
| 0.731317
| 0.710887
| 0.66707
| 0
| 0.008547
| 0.216336
| 11,496
| 284
| 80
| 40.478873
| 0.817294
| 0.045668
| 0
| 0.63285
| 0
| 0
| 0.112602
| 0.039055
| 0
| 0
| 0
| 0
| 0.202899
| 1
| 0.091787
| false
| 0
| 0.038647
| 0
| 0.135266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bef5aaf1ff9723ae8680002976dbc5ebda4fccc9
| 37
|
py
|
Python
|
pp/web/base/tests/test_forjenkins.py
|
oisinmulvihill/pp-web-base
|
0be51b1d98c4923e1f4ccbfaea59ae662a8c5cdc
|
[
"BSD-3-Clause"
] | null | null | null |
pp/web/base/tests/test_forjenkins.py
|
oisinmulvihill/pp-web-base
|
0be51b1d98c4923e1f4ccbfaea59ae662a8c5cdc
|
[
"BSD-3-Clause"
] | null | null | null |
pp/web/base/tests/test_forjenkins.py
|
oisinmulvihill/pp-web-base
|
0be51b1d98c4923e1f4ccbfaea59ae662a8c5cdc
|
[
"BSD-3-Clause"
] | null | null | null |
def test_nonop():
assert 1 == 1
| 9.25
| 17
| 0.567568
| 6
| 37
| 3.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.297297
| 37
| 3
| 18
| 12.333333
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
83363c0ef913ccccece0efe1dc580e5eb1715e0d
| 239
|
py
|
Python
|
veinmind-backdoor/register.py
|
Jqqzzz/veinmind-tools
|
d7d35880efb4f5f5ad4c3f4685f5d0f4ec8e404f
|
[
"MIT"
] | 364
|
2022-02-09T07:05:00.000Z
|
2022-03-31T15:12:52.000Z
|
veinmind-backdoor/register.py
|
lionkgxu/veinmind-tools
|
415aae9da5f0e31275ecdf61a2cef088c766d381
|
[
"MIT"
] | 9
|
2022-03-03T01:02:15.000Z
|
2022-03-28T03:24:30.000Z
|
veinmind-backdoor/register.py
|
lionkgxu/veinmind-tools
|
415aae9da5f0e31275ecdf61a2cef088c766d381
|
[
"MIT"
] | 62
|
2022-02-10T09:54:15.000Z
|
2022-03-31T09:43:00.000Z
|
class register:
plugin_dict = {}
plugin_name = []
@classmethod
def register(cls, plugin_name):
def wrapper(plugin):
cls.plugin_dict[plugin_name] = plugin
return plugin
return wrapper
| 23.9
| 49
| 0.598326
| 25
| 239
| 5.52
| 0.4
| 0.217391
| 0.231884
| 0.289855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.322176
| 239
| 10
| 50
| 23.9
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0
| 0
| 0.777778
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
83399c09776772609094ffc2ac08102d789dfc9b
| 21,383
|
py
|
Python
|
cave/com.raytheon.viz.gfe/python/autotest/RoutineLevel4_1_TestScript.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
cave/com.raytheon.viz.gfe/python/autotest/RoutineLevel4_1_TestScript.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
cave/com.raytheon.viz.gfe/python/autotest/RoutineLevel4_1_TestScript.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
# #
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
# #
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# RoutineLevel4_1_TestScript Local Effects
#
# Author:
# ----------------------------------------------------------------------------
# First run setupTextEA
windLE1 = """Definition["windLE_list"] = 1"""
windLE2 = """Definition["windLE_list"] = 2"""
tempLE1 = """Definition["tempLE_list"] = 1"""
tempLE2 = """Definition["tempLE_list"] = 2"""
periodLE1 = """Definition["Period_1_version"] = 1"""
periodLE2 = """Definition["Period_1_version"] = 2"""
periodLE3 = """Definition["Period_1_version"] = 3"""
tempLE_method1 = """Definition["tempLE_method"] = 1"""
tempLE_method2 = """Definition["tempLE_method"] = 2"""
snowLE1 = """## (self.weather_phrase,self._wxLocalEffects_list()),
## (self.snow_phrase,self._snowAmtLocalEffects_list()),
## (self.total_snow_phrase,self._totalSnowAmtLocalEffects_list()),
"""
snowLE2 = """ (self.weather_phrase,self._wxLocalEffects_list()),
(self.snow_phrase,self._snowAmtLocalEffects_list()),
(self.total_snow_phrase,self._totalSnowAmtLocalEffects_list()),
"""
snow2LE1 = """## ("Period_2_3", 12), """
snow2LE2 = """ ("Period_2_3", 12), """
# Runs LE_Test_Local for each test
scripts = [
{
"name": "LE1",
"commentary": "Local Effects: MaxT (21,40), Wind (N30,N10), Gust 0",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 21, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Highs around 40, except in the lower 20s in the mountains",
"North winds around 10 mph, except north around 35 mph in the mountains",
],
},
{
"name": "LE2",
"commentary": "Local Effects: Wind (N20,N10) -> (N30,N20), Gust 0",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (20, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"North winds around 10 mph increasing to around 25 mph in the afternoon",
"In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon",
],
},
{
"name": "LE3",
"commentary": "Local Effects: Wind (N20,0), Gust 0",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 12, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (0, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Light winds, except north around 25 mph in the mountains",
],
},
{
"name": "LE4",
"commentary": "Local Effects: Wind (N20,0) -> (N30,0), Gust 0",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (0, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (0, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Light winds",
"In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon",
],
},
{
"name": "LE5",
"commentary": "Local Effects: Wind (N20,N10), Gust 0, windLE_list=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 21, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"North winds around 25 mph in the mountains, otherwise north around 10 mph",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (windLE1, windLE2), "undo")
],
},
{
"name": "LE6",
"commentary": "Local Effects: Wind (N20,N10) -> (N30,N20), Gust 0, windLE_list=1",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (20, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon",
"In the valleys, north winds around 10 mph increasing to around 25 mph in the afternoon",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (windLE1, windLE2), "undo")
],
},
{
"name": "LE7",
"commentary": "Local Effects: Temp (21, 40), Wind (N20,N10), Gust 0, tempLE_list=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 21, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Highs around 40, except in the lower 20s in the mountains",
"North winds around 10 mph, except north around 25 mph in the mountains",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (tempLE1, tempLE2), "undo")
],
},
{
"name": "LE8",
"commentary": "Local Effects: MaxT (20,20,20), Period_1_version=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area2"]),
],
"checkStrings": [
"Highs around 20",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE2), "undo")
],
},
{
"name": "LE9",
"commentary": "Local Effects: MaxT (20,20,40), Period_1_version=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20, except around 40 in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE2), "undo")
],
},
{
"name": "LE10",
"commentary": "Local Effects: MaxT (20,30,40), Period_1_version=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 30, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20, except around 30 in the rush valley",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE2), "undo")
],
},
{
"name": "LE11",
"commentary": "Local Effects: MaxT (20,30,40), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 30, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city, and around 30 in the rush valley, and around 40 in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo"),
],
},
{
"name": "LE12",
"commentary": "Local Effects: MaxT (20,40,20), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city and in the benches, and around 40 in the rush valley",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo")
],
},
{
"name": "LE13",
"commentary": "Local Effects: MaxT (20,40,40), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city, and around 40 in the rush valley and in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo"),
],
},
{
"name": "LE14",
"commentary": "Local Effects: MaxT (20,20,40), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city and in the rush valley, and around 40 in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo"),
],
},
{
"name": "LE15",
"commentary": "Local Effects: SnowAmt",
"createGrids": [
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Lkly:S:-:<NoVis>:", "all"),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 3, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 3, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 3, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 5, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 5, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 5, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["BelowElev"]),
],
"checkStrings": [
".TODAY...", "Snow accumulation around 3 inches",
".TONIGHT...", "Snow accumulation around 5 inches",
"...", "Snow accumulation around 1 inch",
"...", "No snow accumulation",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(snowLE1, snowLE2), (snow2LE1, snow2LE2)], "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE16",
"commentary": "Local Effects: SnowAmt",
"createGrids": [
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Lkly:S:-:<NoVis>:", "all"),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 5, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 2, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 4, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 1, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 3, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["BelowElev"]),
],
"checkStrings": [
".TODAY...", "Snow accumulation around 2 inches, except around 5 inches above timberline",
".TONIGHT...", "Snow accumulation around 1 inch, except around 4 inches above timberline",
"...", "Snow accumulation of 1 to 3 inches",
"Total snow accumulation around 4 inches, except around 12 inches above timberline",
"...", "No snow accumulation",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(snowLE1, snowLE2), (snow2LE1, snow2LE2)], "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE17", # Wade and Ballard
"commentary": "Local Effects: Wind (N20,N10) -> (N30,N10)",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"North winds around 10 mph. In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon.",
],
},
{
"name": "LE18", # Wade and Ballard
"commentary": "Local Effects: Wind (N10,N20) -> (N10,N30)",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (10, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
# "North winds around 25 mph increasing to around 35 mph in the afternoon. North winds around 10 mph in the mountains.",
"North winds around 25 mph increasing to around 35 mph in the afternoon. In the mountains, north winds around 10 mph.",
],
},
{
"name": "LE19",
"commentary": "Local Effects for non-intersecting areas -- CASE 3 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "NoWx", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the rush valley, patchy fog in the rush valley, a 50 percent chance of snow showers in the benches, patchy fog in the benches.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE20",
"commentary": "Local Effects for non-intersecting areas -- CASE 3 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 12, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 12, "NoWx", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:RW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:SW:-:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"In the rush valley, chance of thunderstorms in the morning, then chance of showers in the afternoon.",
"In the benches, chance of thunderstorms in the morning, then chance of snow showers in the afternoon.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE21",
"commentary": "Local Effects for non-intersecting areas -- CASE 3 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 12, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 12, "Chc:T:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:RW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:SW:-:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"In the city, a 50 percent chance of thunderstorms.",
"In the rush valley, chance of thunderstorms in the morning, then chance of showers in the afternoon.",
"In the benches, chance of thunderstorms in the morning, then chance of snow showers in the afternoon.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE22",
"commentary": "Local Effects for non-intersecting areas -- CASE 2 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Patchy:F:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the rush valley, a 50 percent chance of snow showers in the benches, chance of showers in the rush valley, chance of snow showers in the benches.",
"Patchy fog.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE23",
"commentary": "Local Effects for non-intersecting areas",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "NoWx", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the rush valley, a 50 percent chance of snow showers in the benches.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE24",
"commentary": "Local Effects for non-intersecting areas -- no consolidation necessary",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the city and in the rush valley, a 50 percent chance of snow showers in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
]
import CreateGrids
import TestScript
def testScript(self, dataMgr):
defaults = {
"cmdLineVars" :"{('Product Issuance', 'productIssuance'): 'Morning', ('Issuance Type', 'issuanceType'): 'ROUTINE', ('Issued By', 'issuedBy'): None}",
"deleteGrids": CreateGrids.Delete_grids,
"productType": "LE_Test_Local",
}
return TestScript.generalTestScript(self, dataMgr, scripts, defaults)
| 41.520388
| 189
| 0.533087
| 2,356
| 21,383
| 4.793294
| 0.11927
| 0.028779
| 0.034712
| 0.05499
| 0.831843
| 0.820066
| 0.7703
| 0.736385
| 0.72452
| 0.720181
| 0
| 0.055798
| 0.240658
| 21,383
| 514
| 190
| 41.601167
| 0.639712
| 0.060328
| 0
| 0.603982
| 0
| 0.022124
| 0.540561
| 0.051686
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002212
| false
| 0
| 0.004425
| 0
| 0.00885
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8369920fc0165d90314e66e5b7970c7cffdf56d6
| 106
|
py
|
Python
|
spark_application/transformations/__init__.py
|
ketanvatsalya/pyspark_project_template
|
72f6cc843ce04cbbf15eaf49c2435b7f31366194
|
[
"MIT"
] | null | null | null |
spark_application/transformations/__init__.py
|
ketanvatsalya/pyspark_project_template
|
72f6cc843ce04cbbf15eaf49c2435b7f31366194
|
[
"MIT"
] | null | null | null |
spark_application/transformations/__init__.py
|
ketanvatsalya/pyspark_project_template
|
72f6cc843ce04cbbf15eaf49c2435b7f31366194
|
[
"MIT"
] | null | null | null |
"""
Package to hold the Transformation Classes
"""
from . import base
from . import spend_per_department
| 15.142857
| 42
| 0.764151
| 14
| 106
| 5.642857
| 0.857143
| 0.253165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160377
| 106
| 6
| 43
| 17.666667
| 0.88764
| 0.396226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
55e27b739ace5413321cb8d38b36117252a799e4
| 2,564
|
py
|
Python
|
flow/sequential.py
|
altosaar/hierarchical-variational-models-physics
|
611d91e0281664d7d5ba1679bec7adfb3aac41e2
|
[
"MIT"
] | 14
|
2020-05-10T20:44:49.000Z
|
2022-01-12T23:06:24.000Z
|
flow/sequential.py
|
altosaar/hierarchical-variational-models-physics
|
611d91e0281664d7d5ba1679bec7adfb3aac41e2
|
[
"MIT"
] | null | null | null |
flow/sequential.py
|
altosaar/hierarchical-variational-models-physics
|
611d91e0281664d7d5ba1679bec7adfb3aac41e2
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
class FlowSequential(nn.Sequential):
"""Forward pass with log determinant of the Jacobian."""
def forward(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
for block in self._modules.values():
input, log_prob = block(input, context)
total_log_prob += log_prob
return input, total_log_prob
def inverse(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
for block in reversed(self._modules.values()):
input, log_prob = block.inverse(input, context)
total_log_prob += log_prob
return input, total_log_prob
def get_memory():
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated()
memory = torch.cuda.memory_allocated()
return memory / 10**9, max_memory / 10**9
class RealNVPSequential(nn.Sequential):
"""Assumes first and last module are CheckerSplit and CheckerUnsplit."""
def forward(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
modules = list(self._modules.values())
split = modules.pop(0)
concat = modules.pop()
transf, const = split(input)
for module in modules:
transf, const, log_prob = module(transf, const, context)
total_log_prob += log_prob
return concat(transf, const), total_log_prob
def inverse(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
modules = list(self._modules.values())
split = modules.pop(0)
concat = modules.pop()
transf, const = split(input)
for module in reversed(modules):
transf, const, log_prob = module.inverse(transf, const, context)
total_log_prob += log_prob
return concat(transf, const), total_log_prob
class SplitSequential(nn.Sequential):
"""Assumes first and last module are CheckerSplit and CheckerConcat."""
def forward(self, transf, const, context=None):
total_log_prob = torch.zeros(transf.size(0), device=transf.device)
for module in self._modules.values():
transf, const, log_prob = module(transf, const, context)
total_log_prob += log_prob
return transf, const, total_log_prob
def inverse(self, transf, const, context=None):
total_log_prob = torch.zeros(transf.size(0), device=transf.device)
for module in reversed(self._modules.values()):
transf, const, log_prob = module.inverse(transf, const, context)
total_log_prob += log_prob
return transf, const, total_log_prob
| 35.123288
| 74
| 0.710608
| 354
| 2,564
| 4.977401
| 0.166667
| 0.119183
| 0.122588
| 0.064699
| 0.820091
| 0.81101
| 0.803065
| 0.764472
| 0.739501
| 0.739501
| 0
| 0.006635
| 0.177067
| 2,564
| 72
| 75
| 35.611111
| 0.828436
| 0.071373
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12963
| false
| 0
| 0.037037
| 0
| 0.351852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
55f43053f0d67231d40b9280a1fec18d43d92658
| 169
|
py
|
Python
|
src/rlib/debug.py
|
SOM-st/PySOM
|
65ef72f44252439b724a7429408dac7f8d1b1d98
|
[
"MIT"
] | 22
|
2015-10-29T05:11:06.000Z
|
2022-03-01T11:18:45.000Z
|
src/rlib/debug.py
|
smarr/PySOM
|
65ef72f44252439b724a7429408dac7f8d1b1d98
|
[
"MIT"
] | 16
|
2021-03-07T22:09:33.000Z
|
2021-08-24T12:36:15.000Z
|
src/rlib/debug.py
|
SOM-st/PySOM
|
65ef72f44252439b724a7429408dac7f8d1b1d98
|
[
"MIT"
] | 5
|
2015-01-02T03:51:29.000Z
|
2020-10-02T07:05:46.000Z
|
try:
from rpython.rlib.debug import make_sure_not_resized # pylint: disable=W
except ImportError:
"NOT_RPYTHON"
def make_sure_not_resized(_):
pass
| 21.125
| 77
| 0.715976
| 23
| 169
| 4.913043
| 0.73913
| 0.141593
| 0.19469
| 0.318584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213018
| 169
| 7
| 78
| 24.142857
| 0.849624
| 0.100592
| 0
| 0
| 0
| 0
| 0.073333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
36011f50763e2763762534e112d2a7cea6f3af2e
| 65
|
py
|
Python
|
experiments/archived/20210203/bag_model/models/__init__.py
|
fxnnxc/text_summarization
|
b8c8a5f491bc44622203602941c1514b2e006fe3
|
[
"Apache-2.0"
] | 5
|
2020-10-14T02:30:44.000Z
|
2021-05-06T12:48:28.000Z
|
experiments/archived/20210119/bag_model/models/__init__.py
|
fxnnxc/text_summarization
|
b8c8a5f491bc44622203602941c1514b2e006fe3
|
[
"Apache-2.0"
] | 2
|
2020-12-19T05:59:31.000Z
|
2020-12-22T11:05:31.000Z
|
experiments/archived/20210203/bag_model/models/__init__.py
|
fxnnxc/text_summarization
|
b8c8a5f491bc44622203602941c1514b2e006fe3
|
[
"Apache-2.0"
] | null | null | null |
from .hub_interface import * # noqa
from .model import * # noqa
| 32.5
| 36
| 0.707692
| 9
| 65
| 5
| 0.666667
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 65
| 2
| 37
| 32.5
| 0.865385
| 0.138462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
369f3934be836b3619a596d326601ac157eae3f4
| 2,344
|
py
|
Python
|
eternalghost.py
|
awareseven/eternalghosttest
|
989dafac06b72af21e1cd7103c92ec6b399e5133
|
[
"MIT"
] | 2
|
2020-03-15T11:39:18.000Z
|
2021-12-05T20:38:48.000Z
|
eternalghost.py
|
awareseven/eternalghosttest
|
989dafac06b72af21e1cd7103c92ec6b399e5133
|
[
"MIT"
] | null | null | null |
eternalghost.py
|
awareseven/eternalghosttest
|
989dafac06b72af21e1cd7103c92ec6b399e5133
|
[
"MIT"
] | 2
|
2020-03-18T20:21:37.000Z
|
2020-10-13T09:19:14.000Z
|
import socket
import struct
import sys
banner = """
_ _ _ _
| | | | | | | |
___| |_ ___ _ __ _ __ __ _| | __ _| |__ ___ ___| |_
/ _ \ __/ _ \ '__| '_ \ / _` | |/ _` | '_ \ / _ \/ __| __|
| __/ || __/ | | | | | (_| | | (_| | | | | (_) \__ \ |_
\___|\__\___|_| |_| |_|\__,_|_|\__, |_| |_|\___/|___/\__|
__/ |
|___/
\t\t\t\t\tby AWARE7 GmbH
"""
print(banner)
if len(sys.argv) < 2:
print("Not enough Arguments")
print("python3 scanner.py <IP-Address>")
sys.exit()
# Connection-Handle for SMB Handshake
pkt = b'\x00\x00\x00\xc0\xfeSMB@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00$\x00\x08\x00\x01\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x00\x00\x00\x02\x00\x00\x00\x02\x02\x10\x02"\x02$\x02\x00\x03\x02\x03\x10\x03\x11\x03\x00\x00\x00\x00\x01\x00&\x00\x00\x00\x00\x00\x01\x00 \x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\n\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00'
# Generate a Socket
sock = socket.socket(socket.AF_INET)
sock.settimeout(3)
# Get Hostname
hostname = sys.argv[1]
# Connect to Host
print("Scanning System: {}\r\n".format(hostname))
sock.connect(( hostname, 445 ))
# Send Handshake
sock.send(pkt)
# Receive Handshake
nb, = struct.unpack(">I", sock.recv(4))
res = sock.recv(nb)
# Check if SMB Version 3_11 is used
if not res[68:70] == b"\x11\x03":
print("\tYour System {} doesn't use the latest SMB Version. This is insecure as well but you are not effected by CVE-2020-0796".format(hostname))
sys.exit(1)
# Check if uses Compression
if not res[70:72] == b"\x02\x00":
print("\tYour System {} is not vulnearble to CVE-2020-0796".format(hostname))
sys.exit(1)
print("\tYour System {} is vulnearble to CVE-2020-0796".format(hostname))
sys.exit(1)
| 45.076923
| 761
| 0.593857
| 362
| 2,344
| 3.593923
| 0.267956
| 0.641045
| 0.857802
| 1.014604
| 0.447348
| 0.435819
| 0.435819
| 0.435819
| 0.40123
| 0.380477
| 0
| 0.228509
| 0.21587
| 2,344
| 52
| 762
| 45.076923
| 0.479325
| 0.074659
| 0
| 0.083333
| 0
| 0.166667
| 0.742593
| 0.347685
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.194444
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
36c9545921e82accc771994b4028870845e16cb0
| 19,349
|
py
|
Python
|
tests/test_cli.py
|
jameswilkerson/elex
|
27733e3c473fef48676f8bdd56247bee49ad32ea
|
[
"Apache-2.0"
] | 183
|
2015-11-25T15:13:47.000Z
|
2022-01-07T23:02:36.000Z
|
tests/test_cli.py
|
jameswilkerson/elex
|
27733e3c473fef48676f8bdd56247bee49ad32ea
|
[
"Apache-2.0"
] | 198
|
2015-11-24T16:48:48.000Z
|
2020-10-26T10:38:56.000Z
|
tests/test_cli.py
|
jameswilkerson/elex
|
27733e3c473fef48676f8bdd56247bee49ad32ea
|
[
"Apache-2.0"
] | 65
|
2015-12-03T21:29:38.000Z
|
2021-08-10T20:03:49.000Z
|
import csv
import sys
import json
import tests
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from six import with_metaclass
from elex.cli.app import ElexApp
from collections import OrderedDict
DATA_FILE = 'tests/data/20151103_national.json'
DATA_ELECTION_DATE = '2015-11-03'
DELSUM_DATA_FILE = 'tests/data/20160118_delsum.json'
DELSUPER_DATA_FILE = 'tests/data/20160118_delsuper.json'
ELECTIONS_DATA_FILE = 'tests/data/00000000_elections.json'
DISTRICT_DATA_FILE = 'tests/data/20160201_district_results.json'
TEST_COMMANDS = [
'races',
'candidates',
'reporting-units',
'candidate-reporting-units',
'results',
]
class ElexCLICSVTestMeta(type):
def __new__(mcs, name, bases, dict):
def gen_fields_test(command):
"""
Dynamically generate a fields test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
api_fields = api_data[0].serialize().keys()
self.assertEqual(cli_fields, list(api_fields))
return test
def gen_length_test(command):
"""
Dynamically generate a data length test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
self.assertEqual(len(cli_data), len(api_data))
return test
def gen_data_test(command):
"""
Dynamically generate a data test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
for i, row in enumerate(cli_data):
for k, v in api_data[i].serialize().items():
if v is None:
v = ''
self.assertEqual(row[k], str(v))
return test
def gen_timestamp_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
self.assertEqual(cli_fields[-1], 'timestamp')
return test
def gen_timestamp_data_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
for row in cli_data:
try:
self.assertTrue(unicode(row['timestamp']).isnumeric())
except NameError:
self.assertTrue(str(row['timestamp']).isnumeric())
return test
def gen_batch_name_data_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
batch_name='batch-01')
for row in cli_data:
self.assertEqual(row['batchname'], 'batch-01')
return test
for command in TEST_COMMANDS:
fields_test_name = 'test_csv_{0}_fields'.format(
command.replace('-', '_')
)
dict[fields_test_name] = gen_fields_test(command)
length_test_name = 'test_csv_{0}_length'.format(
command.replace('-', '_')
)
dict[length_test_name] = gen_length_test(command)
data_test_name = 'test_csv_{0}_data'.format(
command.replace('-', '_')
)
dict[data_test_name] = gen_data_test(command)
timestamp_test_name = 'test_csv_{0}_timestamp'.format(
command.replace('-', '_')
)
dict[timestamp_test_name] = gen_timestamp_test(command)
timestamp_data_test_name = 'test_csv_{0}_timestamp_data'.format(
command.replace('-', '_')
)
dict[timestamp_data_test_name] = gen_timestamp_data_test(command)
batch_name_data_test_name = 'test_csv_{0}_batch_name_data'.format(
command.replace('-', '_')
)
dict[batch_name_data_test_name] = gen_batch_name_data_test(command)
return type.__new__(mcs, name, bases, dict)
class ElexCLICSVTestCase(
with_metaclass(ElexCLICSVTestMeta, tests.ElectionResultsTestCase)
):
"""
This testing class is mostly dynamically generated by its metaclass.
The goal of the CLI tests is to the make sure the CLI output matches the
Python API. The API tests guarantee the validity of the data, while these
tests guarantee the CLI provides the same data in CSV format.
"""
def test_csv_elections_fields(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_csv_elections_length(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(len(data), 11)
def test_csv_elections_date(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['electiondate'], '2015-08-04')
def test_csv_elections_liveresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['liveresults'], 'False')
def test_csv_elections_testresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['testresults'], 'True')
def test_csv_next_election_fields(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_csv_next_election_length(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(len(data), 1)
def test_csv_next_election_date(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['electiondate'], '2015-08-25')
def test_csv_next_election_liveresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['liveresults'], 'True')
def test_csv_next_election_testresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['testresults'], 'False')
def test_csv_delegate_fields(self):
fields, data = self._test_command(command='delegates')
self.assertEqual(
fields,
[
'level', 'party_total', 'superdelegates_count', 'last',
'state', 'candidateid', 'party_need', 'party',
'delegates_count', 'id', 'd1', 'd7', 'd30'
]
)
def test_csv_delegate_state_count(self):
fields, data = self._test_command(command='delegates')
number_of_states = list(
set([d['state'] for d in data if d['level'] == 'state'])
)
self.assertEqual(58, len(number_of_states))
def test_csv_results_resultslevel(self):
fields, data = self._test_command(
command='results',
datafile=DISTRICT_DATA_FILE,
resultslevel='district'
)
self.assertEqual(data[17]['reportingunitname'], 'District 1')
def _test_command(
self,
command,
datafile=DATA_FILE,
delsum_datafile=DELSUM_DATA_FILE,
delsuper_datafile=DELSUPER_DATA_FILE,
electiondate=DATA_ELECTION_DATE,
resultslevel=None,
with_timestamp=False,
batch_name=False
):
"""
Execute an `elex` sub-command; returns fieldnames and rows
"""
stdout_backup = sys.stdout
sys.stdout = StringIO()
argv = [command]
if electiondate is not None:
argv.append(electiondate)
argv = argv + ['--data-file', datafile]
argv = argv + ['--delegate-sum-file', delsum_datafile]
argv = argv + ['--delegate-super-file', delsuper_datafile]
argv = argv + ['--results-level', resultslevel]
if with_timestamp:
argv = argv + ['--with-timestamp']
if batch_name:
argv = argv + ['--batch-name', batch_name]
app = ElexApp(argv=argv)
app.setup()
app.log.set_level('FATAL')
app.run()
lines = sys.stdout.getvalue().split('\n')
reader = csv.DictReader(lines)
sys.stdout.close()
sys.stdout = stdout_backup
return reader.fieldnames, list(reader)
class ElexCLIJSONTestMeta(type):
def __new__(mcs, name, bases, dict):
def gen_fields_test(command):
"""
Dynamically generate a fields test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
api_fields = api_data[0].serialize().keys()
self.assertEqual(cli_fields, list(api_fields))
return test
def gen_length_test(command):
"""
Dynamically generate a data length test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
self.assertEqual(len(cli_data), len(api_data))
return test
def gen_data_test(command):
"""
Dynamically generate a data test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
for i, row in enumerate(cli_data):
for k, v in api_data[i].serialize().items():
self.assertEqual(row[k], v)
return test
def gen_timestamp_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
self.assertEqual(cli_fields[-1], 'timestamp')
return test
def gen_timestamp_data_test(command):
"""
Generate test to ensure timestamp data is an integer
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
for row in cli_data:
try:
self.assertTrue(unicode(row['timestamp']).isnumeric())
except NameError:
self.assertTrue(str(row['timestamp']).isnumeric())
return test
def gen_batch_name_data_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
batch_name='batch-01')
for row in cli_data:
self.assertEqual(row['batchname'], 'batch-01')
return test
for command in TEST_COMMANDS:
fields_test_name = 'test_json_{0}_fields'.format(
command.replace('-', '_')
)
dict[fields_test_name] = gen_fields_test(command)
length_test_name = 'test_json_{0}_length'.format(
command.replace('-', '_')
)
dict[length_test_name] = gen_length_test(command)
data_test_name = 'test_json_{0}_data'.format(
command.replace('-', '_')
)
dict[data_test_name] = gen_data_test(command)
timestamp_data_test_name = 'test_json_{0}_data_timestamp'.format(
command.replace('-', '_')
)
dict[timestamp_data_test_name] = gen_timestamp_test(command)
timestamp_data_test_name = 'test_json_{0}_timestamp_data'.format(
command.replace('-', '_')
)
dict[timestamp_data_test_name] = gen_timestamp_data_test(command)
batch_name_data_test_name = 'test_csv_{0}_batch_name_data'.format(
command.replace('-', '_')
)
dict[batch_name_data_test_name] = gen_batch_name_data_test(command)
return type.__new__(mcs, name, bases, dict)
class ElexCLIJSONTestCase(
with_metaclass(ElexCLIJSONTestMeta, tests.ElectionResultsTestCase)
):
"""
This testing class is mostly dynamically generated by its metaclass.
The goal of the CLI tests is to the make sure the CLI output matches the
Python API. The API tests guarantee the validity of the data, while these
tests guarantee the CLI provides the same data in JSON format.
"""
def test_json_elections_fields(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_json_elections_length(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(len(data), 11)
def test_json_elections_date(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['electiondate'], '2015-08-04')
def test_json_elections_liveresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['liveresults'], False)
def test_json_elections_testresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['testresults'], True)
def test_json_next_election_fields(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_json_next_election_length(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(len(data), 1)
def test_json_next_election_date(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['electiondate'], '2015-08-25')
def test_json_next_election_liveresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['liveresults'], True)
def test_json_next_election_testresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['testresults'], False)
def test_json_delegate_fields(self):
fields, data = self._test_command(command='delegates')
self.assertEqual(
fields,
[
'level', 'party_total', 'superdelegates_count', 'last',
'state', 'candidateid', 'party_need', 'party',
'delegates_count', 'id', 'd1', 'd7', 'd30'
]
)
def test_json_delegate_state_count(self):
fields, data = self._test_command(command='delegates')
number_of_states = list(
set([d['state'] for d in data if d['level'] == 'state'])
)
self.assertEqual(58, len(number_of_states))
def test_json_results_resultslevel(self):
fields, data = self._test_command(
command='results',
datafile=DISTRICT_DATA_FILE,
resultslevel='district'
)
self.assertEqual(data[17]['reportingunitname'], 'District 1')
def _test_command(
self,
command,
datafile=DATA_FILE,
delsum_datafile=DELSUM_DATA_FILE,
delsuper_datafile=DELSUPER_DATA_FILE,
electiondate=DATA_ELECTION_DATE,
resultslevel=None,
with_timestamp=False,
batch_name=False
):
"""
Execute an `elex` sub-command; returns fieldnames and rows
"""
stdout_backup = sys.stdout
sys.stdout = StringIO()
argv = [command]
argv.append(electiondate)
argv = argv + ['--data-file', datafile, '-o', 'json']
argv = argv + ['--delegate-sum-file', delsum_datafile]
argv = argv + ['--delegate-super-file', delsuper_datafile]
argv = argv + ['--results-level', resultslevel]
if with_timestamp:
argv = argv + ['--with-timestamp']
if batch_name:
argv = argv + ['--batch-name', batch_name]
app = ElexApp(argv=argv)
app.setup()
app.log.set_level('FATAL')
app.run()
json_data = sys.stdout.getvalue()
data = json.loads(json_data, object_pairs_hook=OrderedDict)
sys.stdout.close()
sys.stdout = stdout_backup
return list(data[0].keys()), data
| 33.826923
| 80
| 0.567213
| 2,008
| 19,349
| 5.190737
| 0.092629
| 0.067543
| 0.043749
| 0.06927
| 0.909047
| 0.901564
| 0.893217
| 0.892449
| 0.875756
| 0.873357
| 0
| 0.017043
| 0.329836
| 19,349
| 571
| 81
| 33.886165
| 0.786766
| 0.060985
| 0
| 0.687646
| 0
| 0
| 0.105222
| 0.022604
| 0
| 0
| 0
| 0
| 0.09324
| 1
| 0.125874
| false
| 0
| 0.02331
| 0
| 0.195804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
36eb37aac32d06e68b8f0f03ae15c8cd3b04fb1f
| 49
|
py
|
Python
|
trees/dasgupta/__init__.py
|
islamazhar/trees
|
502565c5bf02503c7bece09cddd93f9368da02c3
|
[
"MIT"
] | null | null | null |
trees/dasgupta/__init__.py
|
islamazhar/trees
|
502565c5bf02503c7bece09cddd93f9368da02c3
|
[
"MIT"
] | null | null | null |
trees/dasgupta/__init__.py
|
islamazhar/trees
|
502565c5bf02503c7bece09cddd93f9368da02c3
|
[
"MIT"
] | null | null | null |
from trees.dasgupta.costtree import DasguptaTree
| 24.5
| 48
| 0.877551
| 6
| 49
| 7.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7fe7fde051fa8a3d76d968e9a6574579dd014181
| 152
|
py
|
Python
|
exercises/01_Primeiros Passos/exe_08.py
|
MariaTrindade/CursoPython
|
2c60dd670747db08011d9dd33e3bbfd5795b06e8
|
[
"Apache-2.0"
] | 1
|
2021-05-11T18:30:17.000Z
|
2021-05-11T18:30:17.000Z
|
exercises/01_Primeiros Passos/exe_08.py
|
MariaTrindade/CursoPython
|
2c60dd670747db08011d9dd33e3bbfd5795b06e8
|
[
"Apache-2.0"
] | null | null | null |
exercises/01_Primeiros Passos/exe_08.py
|
MariaTrindade/CursoPython
|
2c60dd670747db08011d9dd33e3bbfd5795b06e8
|
[
"Apache-2.0"
] | null | null | null |
"""
Faça um Programa que peça a temperatura em graus Fahrenheit, transforme e mostre
a temperatura em graus Celsius.
C = (5 * (F-32) / 9)
"""
| 9.5
| 80
| 0.651316
| 23
| 152
| 4.304348
| 0.826087
| 0.242424
| 0.282828
| 0.383838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034783
| 0.243421
| 152
| 15
| 81
| 10.133333
| 0.826087
| 0.881579
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7fec3044100d2f06c27146cd462ed08cea1a54d2
| 201
|
py
|
Python
|
utils/compilers/ConnectionCompiler/token.py
|
pranaOS-bot/pranaOS-1
|
ddb8086d103d004f84744641624e74fc7ec0984e
|
[
"BSD-2-Clause"
] | 5
|
2021-10-06T13:47:26.000Z
|
2022-03-24T10:42:06.000Z
|
utils/compilers/ConnectionCompiler/token.py
|
evilbat831/brutalOS
|
85920a6a95d564320a245a2e48ffc7cdf64ede84
|
[
"BSD-2-Clause"
] | null | null | null |
utils/compilers/ConnectionCompiler/token.py
|
evilbat831/brutalOS
|
85920a6a95d564320a245a2e48ffc7cdf64ede84
|
[
"BSD-2-Clause"
] | 1
|
2021-10-18T12:48:16.000Z
|
2021-10-18T12:48:16.000Z
|
class Token:
def __init__(self, type=None, value=None):
self.type = type
self.value = value
def __str__(self):
return "Token({0}, {1})".format(self.type, self.value)
| 20.1
| 62
| 0.587065
| 27
| 201
| 4.074074
| 0.481481
| 0.218182
| 0.236364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 0.263682
| 201
| 10
| 62
| 20.1
| 0.72973
| 0
| 0
| 0
| 0
| 0
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
3d0113714f49189583df2b472f9f7bb1b7d3193b
| 117
|
py
|
Python
|
aficionado/defaults.py
|
SamuelHornsey/aficionado
|
27654028ede3d719b091dd61f5c8d252f631a316
|
[
"MIT"
] | 1
|
2019-11-27T21:58:10.000Z
|
2019-11-27T21:58:10.000Z
|
aficionado/defaults.py
|
SamuelHornsey/aficionado
|
27654028ede3d719b091dd61f5c8d252f631a316
|
[
"MIT"
] | null | null | null |
aficionado/defaults.py
|
SamuelHornsey/aficionado
|
27654028ede3d719b091dd61f5c8d252f631a316
|
[
"MIT"
] | null | null | null |
def not_found_handler():
return '404. Path not found'
def internal_error_handler():
return '500. Internal error'
| 23.4
| 30
| 0.752137
| 17
| 117
| 4.941176
| 0.588235
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06
| 0.145299
| 117
| 5
| 31
| 23.4
| 0.78
| 0
| 0
| 0
| 0
| 0
| 0.322034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
3d06f699f338062bc96644c815234c6952e6bcf8
| 1,136
|
py
|
Python
|
libary/yml_wrapper.py
|
NekoFanatic/kaiji
|
7ae8e12d4e821e7d28d78034e1ec044ed75f9536
|
[
"MIT"
] | null | null | null |
libary/yml_wrapper.py
|
NekoFanatic/kaiji
|
7ae8e12d4e821e7d28d78034e1ec044ed75f9536
|
[
"MIT"
] | null | null | null |
libary/yml_wrapper.py
|
NekoFanatic/kaiji
|
7ae8e12d4e821e7d28d78034e1ec044ed75f9536
|
[
"MIT"
] | null | null | null |
from typing import Union
import yaml
class ConfigReader:
def __init__(self):
with open("config.yml", "r") as f:
data = yaml.safe_load(f)
self.data = data
def __getattr__(self, __name: str):
s = __name.split("_")
data = self.data
try:
for i in s:
data = data[i]
return data
except KeyError:
raise Exception("Can't find object")
class TextReader:
def __init__(self):
with open("text.yml", "r") as f:
data = yaml.safe_load(f)
self.data = data
def __getattr__(self, __name: str):
s = __name.split("_")
data = self.data
try:
for i in s:
data = data[i]
return data
except KeyError:
raise Exception("Can't find object")
def find(self, string: str) -> Union[str, list]:
s = string.split("_")
data = self.data
try:
for i in s:
data = data[i]
return data
except KeyError:
raise Exception("Can't find object")
| 23.183673
| 52
| 0.49912
| 136
| 1,136
| 3.955882
| 0.308824
| 0.074349
| 0.072491
| 0.094796
| 0.791822
| 0.72119
| 0.72119
| 0.72119
| 0.72119
| 0.72119
| 0
| 0
| 0.399648
| 1,136
| 48
| 53
| 23.666667
| 0.788856
| 0
| 0
| 0.794872
| 0
| 0
| 0.065141
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.051282
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3d224cb8121fbd91cf794debf39fda90674c7943
| 82
|
py
|
Python
|
technews/__init__.py
|
WisChang005/technews_watcher
|
454ef30bab7731c629f0e3b577ce340c48a6cbe7
|
[
"MIT"
] | 1
|
2019-03-31T15:34:10.000Z
|
2019-03-31T15:34:10.000Z
|
technews/__init__.py
|
WisChang005/technews_watcher
|
454ef30bab7731c629f0e3b577ce340c48a6cbe7
|
[
"MIT"
] | null | null | null |
technews/__init__.py
|
WisChang005/technews_watcher
|
454ef30bab7731c629f0e3b577ce340c48a6cbe7
|
[
"MIT"
] | null | null | null |
from .technews_helper import TechNews
from .mail_helper import EmailContentHelper
| 27.333333
| 43
| 0.878049
| 10
| 82
| 7
| 0.6
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 82
| 2
| 44
| 41
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d25c2e6e29e6e78df3ddd62294d2447deebe52c
| 28
|
py
|
Python
|
aoc_tools/__init__.py
|
dannyboywoop/AOC_Tools
|
b47374ae465c5772d7b4c09f40eb6e69d68cc144
|
[
"MIT"
] | null | null | null |
aoc_tools/__init__.py
|
dannyboywoop/AOC_Tools
|
b47374ae465c5772d7b4c09f40eb6e69d68cc144
|
[
"MIT"
] | null | null | null |
aoc_tools/__init__.py
|
dannyboywoop/AOC_Tools
|
b47374ae465c5772d7b4c09f40eb6e69d68cc144
|
[
"MIT"
] | null | null | null |
from ._advent_timer import *
| 28
| 28
| 0.821429
| 4
| 28
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d2ce2c966a31e97ee5b7a66b2aeabb6f1778574
| 35
|
py
|
Python
|
arcpyext/mapping/_cim/__init__.py
|
PeterReyne/arcpyext
|
9307115da8f0b6a30e2ca741fb6a7d09e54fd0f3
|
[
"BSD-3-Clause"
] | 11
|
2015-05-01T04:08:30.000Z
|
2019-09-21T05:00:58.000Z
|
arcpyext/mapping/_cim/__init__.py
|
PeterReyne/arcpyext
|
9307115da8f0b6a30e2ca741fb6a7d09e54fd0f3
|
[
"BSD-3-Clause"
] | 14
|
2015-06-23T02:46:44.000Z
|
2019-10-11T00:46:11.000Z
|
arcpyext/mapping/_cim/__init__.py
|
PeterReyne/arcpyext
|
9307115da8f0b6a30e2ca741fb6a7d09e54fd0f3
|
[
"BSD-3-Clause"
] | 9
|
2015-02-27T05:25:42.000Z
|
2020-01-19T05:43:14.000Z
|
from .pro_project import ProProject
| 35
| 35
| 0.885714
| 5
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d30c11f1ede17efd698bce52b1da5e9569d559a
| 456
|
py
|
Python
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/conftest.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 5
|
2019-01-19T23:53:35.000Z
|
2022-01-29T14:04:31.000Z
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/conftest.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 4
|
2020-09-26T01:30:01.000Z
|
2022-02-10T02:20:35.000Z
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/conftest.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 7
|
2020-03-04T22:23:51.000Z
|
2021-07-13T14:05:46.000Z
|
import pytest
from markov.tests import test_constant
@pytest.fixture
def aws_region():
return test_constant.AWS_REGION
@pytest.fixture
def model_metadata_s3_key():
return test_constant.MODEL_METADATA_S3_KEY
@pytest.fixture
def reward_function_s3_source():
return test_constant.REWARD_FUNCTION_S3_SOURCE
@pytest.fixture
def s3_bucket():
return test_constant.S3_BUCKET
@pytest.fixture
def s3_prefix():
return test_constant.S3_PREFIX
| 19.826087
| 50
| 0.809211
| 66
| 456
| 5.227273
| 0.318182
| 0.208696
| 0.231884
| 0.104348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0.122807
| 456
| 22
| 51
| 20.727273
| 0.8425
| 0
| 0
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| true
| 0
| 0.117647
| 0.294118
| 0.705882
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
3d524c3bd35810437426c4644ee0f769511b58ea
| 152
|
py
|
Python
|
bindings/python/examples/05b_get_output.py
|
GoldenPedro/iota.rs
|
71464f96b8e29d9fbed34a6ff77e757a112fedd4
|
[
"Apache-2.0"
] | 256
|
2017-06-27T02:37:21.000Z
|
2022-03-28T07:51:48.000Z
|
bindings/python/examples/05b_get_output.py
|
GoldenPedro/iota.rs
|
71464f96b8e29d9fbed34a6ff77e757a112fedd4
|
[
"Apache-2.0"
] | 379
|
2017-06-25T05:49:14.000Z
|
2022-03-29T18:57:11.000Z
|
bindings/python/examples/05b_get_output.py
|
GoldenPedro/iota.rs
|
71464f96b8e29d9fbed34a6ff77e757a112fedd4
|
[
"Apache-2.0"
] | 113
|
2017-06-25T14:07:05.000Z
|
2022-03-30T09:10:12.000Z
|
import iota_client
client = iota_client.Client()
print(
client.get_output("a22cba0667c922cbb1f8bdcaf970b2a881ccd6e88e2fcce50374de2aac7c37720000")
)
| 25.333333
| 93
| 0.848684
| 12
| 152
| 10.5
| 0.583333
| 0.15873
| 0.253968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.264286
| 0.078947
| 152
| 6
| 94
| 25.333333
| 0.635714
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.2
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3d56d13a865c0fd22d417834c65ef6529f433ba4
| 104
|
py
|
Python
|
Python/jump-to-python/Exponential.py
|
leeheefull/blog-source
|
5f8370de5b0f62801fffc9e5f0f0bcb98dc2e6d1
|
[
"MIT"
] | null | null | null |
Python/jump-to-python/Exponential.py
|
leeheefull/blog-source
|
5f8370de5b0f62801fffc9e5f0f0bcb98dc2e6d1
|
[
"MIT"
] | null | null | null |
Python/jump-to-python/Exponential.py
|
leeheefull/blog-source
|
5f8370de5b0f62801fffc9e5f0f0bcb98dc2e6d1
|
[
"MIT"
] | null | null | null |
# 지수부 표현
a = 1e9
print(a) # 1000000000.0
a = 7.525e2
print(a) # 752.5
a = 3954e-3
print(a) # 3.954
| 10.4
| 24
| 0.576923
| 22
| 104
| 2.727273
| 0.636364
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.397436
| 0.25
| 104
| 9
| 25
| 11.555556
| 0.371795
| 0.298077
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
3d582b494cb98544a7b8b83f15184b7f8c7c6d2b
| 43
|
py
|
Python
|
python/parse_ddl/tests/ddl_examples/test_vs.py
|
jared-ong/data-projects
|
21ceccacb8e408ca45fe95c1c4d311f48e8f7708
|
[
"MIT"
] | null | null | null |
python/parse_ddl/tests/ddl_examples/test_vs.py
|
jared-ong/data-projects
|
21ceccacb8e408ca45fe95c1c4d311f48e8f7708
|
[
"MIT"
] | null | null | null |
python/parse_ddl/tests/ddl_examples/test_vs.py
|
jared-ong/data-projects
|
21ceccacb8e408ca45fe95c1c4d311f48e8f7708
|
[
"MIT"
] | null | null | null |
import json
import re
print("Hello world")
| 10.75
| 20
| 0.767442
| 7
| 43
| 4.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 43
| 4
| 20
| 10.75
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
180f8229eeb538cba11111f51d0cfaabcfe979dc
| 14,002
|
py
|
Python
|
test.py
|
gmberton/deep-visual-geo-localization-benchmark
|
7ac395411b7eeff99da66675dedc5372839e5632
|
[
"MIT"
] | 1
|
2022-03-25T06:48:16.000Z
|
2022-03-25T06:48:16.000Z
|
test.py
|
gmberton/deep-visual-geo-localization-benchmark
|
7ac395411b7eeff99da66675dedc5372839e5632
|
[
"MIT"
] | null | null | null |
test.py
|
gmberton/deep-visual-geo-localization-benchmark
|
7ac395411b7eeff99da66675dedc5372839e5632
|
[
"MIT"
] | null | null | null |
import faiss
import torch
import logging
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
def test_efficient_ram_usage(args, eval_ds, model, test_method="hard_resize"):
"""This function gives the same output as test(), but uses much less RAM.
This can be useful when testing with large descriptors (e.g. NetVLAD) on large datasets (e.g. San Francisco).
Obviously it is slower than test(), and can't be used with PCA.
"""
model = model.eval()
if test_method == 'nearest_crop' or test_method == "maj_voting":
distances = np.empty([eval_ds.queries_num * 5, eval_ds.database_num], dtype=np.float32)
else:
distances = np.empty([eval_ds.queries_num, eval_ds.database_num], dtype=np.float32)
with torch.no_grad():
if test_method == 'nearest_crop' or test_method == 'maj_voting':
queries_features = np.ones((eval_ds.queries_num * 5, args.features_dim), dtype="float32")
else:
queries_features = np.ones((eval_ds.queries_num, args.features_dim), dtype="float32")
logging.debug("Extracting queries features for evaluation/testing")
queries_infer_batch_size = 1 if test_method == "single_query" else args.infer_batch_size
eval_ds.test_method = test_method
queries_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num, eval_ds.database_num+eval_ds.queries_num)))
queries_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
batch_size=queries_infer_batch_size, pin_memory=(args.device=="cuda"))
for inputs, indices in tqdm(queries_dataloader, ncols=100):
if test_method == "five_crops" or test_method == "nearest_crop" or test_method == 'maj_voting':
inputs = torch.cat(tuple(inputs)) # shape = 5*bs x 3 x 480 x 480
features = model(inputs.to(args.device))
if test_method == "five_crops": # Compute mean along the 5 crops
features = torch.stack(torch.split(features, 5)).mean(1)
if test_method == "nearest_crop" or test_method == 'maj_voting':
start_idx = (indices[0] - eval_ds.database_num) * 5
end_idx = start_idx + indices.shape[0] * 5
indices = np.arange(start_idx, end_idx)
queries_features[indices, :] = features.cpu().numpy()
else:
queries_features[indices.numpy()-eval_ds.database_num, :] = features.cpu().numpy()
queries_features = torch.tensor(queries_features).type(torch.float32).cuda()
logging.debug("Extracting database features for evaluation/testing")
# For database use "hard_resize", although it usually has no effect because database images have same resolution
eval_ds.test_method = "hard_resize"
database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, pin_memory=(args.device=="cuda"))
for inputs, indices in tqdm(database_dataloader, ncols=100):
inputs = inputs.to(args.device)
features = model(inputs)
for pn, (index, pred_feature) in enumerate(zip(indices, features)):
distances[:, index] = ((queries_features-pred_feature)**2).sum(1).cpu().numpy()
del features, queries_features, pred_feature
predictions = distances.argsort(axis=1)[:, :max(args.recall_values)]
if test_method == 'nearest_crop':
distances = np.array([distances[row, index] for row, index in enumerate(predictions)])
distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))
predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))
for q in range(eval_ds.queries_num):
# sort predictions by distance
sort_idx = np.argsort(distances[q])
predictions[q] = predictions[q, sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(predictions[q], return_index=True)
# unique_idx is sorted based on the unique values, sort it again
predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]
predictions = predictions[:, :20] # keep only the closer 20 predictions for each
elif test_method == 'maj_voting':
distances = np.array([distances[row, index] for row, index in enumerate(predictions)])
distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))
predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))
for q in range(eval_ds.queries_num):
# votings, modify distances in-place
top_n_voting('top1', predictions[q], distances[q], args.majority_weight)
top_n_voting('top5', predictions[q], distances[q], args.majority_weight)
top_n_voting('top10', predictions[q], distances[q], args.majority_weight)
# flatten dist and preds from 5, 20 -> 20*5
# and then proceed as usual to keep only first 20
dists = distances[q].flatten()
preds = predictions[q].flatten()
# sort predictions by distance
sort_idx = np.argsort(dists)
preds = preds[sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(preds, return_index=True)
# unique_idx is sorted based on the unique values, sort it again
# here the row corresponding to the first crop is used as a
# 'buffer' for each query, and in the end the dimension
# relative to crops is eliminated
predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]
predictions = predictions[:, 0, :20] # keep only the closer 20 predictions for each query
del distances
#### For each query, check if the predictions are correct
positives_per_query = eval_ds.get_positives()
# args.recall_values by default is [1, 5, 10, 20]
recalls = np.zeros(len(args.recall_values))
for query_index, pred in enumerate(predictions):
for i, n in enumerate(args.recall_values):
if np.any(np.in1d(pred[:n], positives_per_query[query_index])):
recalls[i:] += 1
break
recalls = recalls / eval_ds.queries_num * 100
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
return recalls, recalls_str
def test(args, eval_ds, model, test_method="hard_resize", pca=None):
"""Compute features of the given dataset and compute the recalls."""
assert test_method in ["hard_resize", "single_query", "central_crop", "five_crops",
"nearest_crop", "maj_voting"], f"test_method can't be {test_method}"
if args.efficient_ram_testing:
return test_efficient_ram_usage(args, eval_ds, model, test_method)
model = model.eval()
with torch.no_grad():
logging.debug("Extracting database features for evaluation/testing")
# For database use "hard_resize", although it usually has no effect because database images have same resolution
eval_ds.test_method = "hard_resize"
database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, pin_memory=(args.device=="cuda"))
if test_method == "nearest_crop" or test_method == 'maj_voting':
all_features = np.empty((5 * eval_ds.queries_num + eval_ds.database_num, args.features_dim), dtype="float32")
else:
all_features = np.empty((len(eval_ds), args.features_dim), dtype="float32")
for inputs, indices in tqdm(database_dataloader, ncols=100):
features = model(inputs.to(args.device))
features = features.cpu().numpy()
if pca != None:
features = pca.transform(features)
all_features[indices.numpy(), :] = features
logging.debug("Extracting queries features for evaluation/testing")
queries_infer_batch_size = 1 if test_method == "single_query" else args.infer_batch_size
eval_ds.test_method = test_method
queries_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num, eval_ds.database_num+eval_ds.queries_num)))
queries_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
batch_size=queries_infer_batch_size, pin_memory=(args.device=="cuda"))
for inputs, indices in tqdm(queries_dataloader, ncols=100):
if test_method == "five_crops" or test_method == "nearest_crop" or test_method == 'maj_voting':
inputs = torch.cat(tuple(inputs)) # shape = 5*bs x 3 x 480 x 480
features = model(inputs.to(args.device))
if test_method == "five_crops": # Compute mean along the 5 crops
features = torch.stack(torch.split(features, 5)).mean(1)
features = features.cpu().numpy()
if pca != None:
features = pca.transform(features)
if test_method == "nearest_crop" or test_method == 'maj_voting': # store the features of all 5 crops
start_idx = eval_ds.database_num + (indices[0] - eval_ds.database_num) * 5
end_idx = start_idx + indices.shape[0] * 5
indices = np.arange(start_idx, end_idx)
all_features[indices, :] = features
else:
all_features[indices.numpy(), :] = features
queries_features = all_features[eval_ds.database_num:]
database_features = all_features[:eval_ds.database_num]
faiss_index = faiss.IndexFlatL2(args.features_dim)
faiss_index.add(database_features)
del database_features, all_features
logging.debug("Calculating recalls")
distances, predictions = faiss_index.search(queries_features, max(args.recall_values))
if test_method == 'nearest_crop':
distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))
predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))
for q in range(eval_ds.queries_num):
# sort predictions by distance
sort_idx = np.argsort(distances[q])
predictions[q] = predictions[q, sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(predictions[q], return_index=True)
# unique_idx is sorted based on the unique values, sort it again
predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]
predictions = predictions[:, :20] # keep only the closer 20 predictions for each query
elif test_method == 'maj_voting':
distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))
predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))
for q in range(eval_ds.queries_num):
# votings, modify distances in-place
top_n_voting('top1', predictions[q], distances[q], args.majority_weight)
top_n_voting('top5', predictions[q], distances[q], args.majority_weight)
top_n_voting('top10', predictions[q], distances[q], args.majority_weight)
# flatten dist and preds from 5, 20 -> 20*5
# and then proceed as usual to keep only first 20
dists = distances[q].flatten()
preds = predictions[q].flatten()
# sort predictions by distance
sort_idx = np.argsort(dists)
preds = preds[sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(preds, return_index=True)
# unique_idx is sorted based on the unique values, sort it again
# here the row corresponding to the first crop is used as a
# 'buffer' for each query, and in the end the dimension
# relative to crops is eliminated
predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]
predictions = predictions[:, 0, :20] # keep only the closer 20 predictions for each query
#### For each query, check if the predictions are correct
positives_per_query = eval_ds.get_positives()
# args.recall_values by default is [1, 5, 10, 20]
recalls = np.zeros(len(args.recall_values))
for query_index, pred in enumerate(predictions):
for i, n in enumerate(args.recall_values):
if np.any(np.in1d(pred[:n], positives_per_query[query_index])):
recalls[i:] += 1
break
# Divide by the number of queries*100, so the recalls are in percentages
recalls = recalls / eval_ds.queries_num * 100
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
return recalls, recalls_str
def top_n_voting(topn, predictions, distances, maj_weight):
if topn == 'top1':
n = 1
selected = 0
elif topn == 'top5':
n = 5
selected = slice(0, 5)
elif topn == 'top10':
n = 10
selected = slice(0, 10)
# find predictions that repeat in the first, first five,
# or fist ten columns for each crop
vals, counts = np.unique(predictions[:, selected], return_counts=True)
# for each prediction that repeats more than once,
# subtract from its score
for val, count in zip(vals[counts > 1], counts[counts > 1]):
mask = (predictions[:, selected] == val)
distances[:, selected][mask] -= maj_weight * count/n
| 54.909804
| 121
| 0.644337
| 1,845
| 14,002
| 4.700813
| 0.137669
| 0.03459
| 0.031477
| 0.038741
| 0.798801
| 0.784388
| 0.770552
| 0.746224
| 0.724893
| 0.716361
| 0
| 0.018448
| 0.252821
| 14,002
| 255
| 122
| 54.909804
| 0.810552
| 0.17676
| 0
| 0.701087
| 0
| 0
| 0.065567
| 0
| 0
| 0
| 0
| 0
| 0.005435
| 1
| 0.016304
| false
| 0
| 0.038043
| 0
| 0.070652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
18173f17dd015c09e3b1cfc44c736b20bfea7170
| 126
|
py
|
Python
|
ppa-mirror/config.py
|
elprup/ppa-mirror
|
29e8a5027bbb698fcb36a250484b08ea945f65cf
|
[
"MIT"
] | null | null | null |
ppa-mirror/config.py
|
elprup/ppa-mirror
|
29e8a5027bbb698fcb36a250484b08ea945f65cf
|
[
"MIT"
] | null | null | null |
ppa-mirror/config.py
|
elprup/ppa-mirror
|
29e8a5027bbb698fcb36a250484b08ea945f65cf
|
[
"MIT"
] | 1
|
2021-03-04T13:43:34.000Z
|
2021-03-04T13:43:34.000Z
|
cache_root = '/home/ubuntu/ppa-mirror/cache/'
mirror_root = '/home/ubuntu/ppa-mirror/repo'
http_proxy = "188.112.194.222:8080"
| 42
| 45
| 0.746032
| 21
| 126
| 4.333333
| 0.666667
| 0.175824
| 0.307692
| 0.373626
| 0.505495
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 0.063492
| 126
| 3
| 46
| 42
| 0.635593
| 0
| 0
| 0
| 0
| 0
| 0.614173
| 0.456693
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
18444ea5a0cd3e04e2706a71502de539bb9fa0dc
| 1,709
|
py
|
Python
|
python/tests/test_tree_intersection.py
|
Yonatan1P/data-structures-and-algorithms
|
ddd647d52a3182ca01032bfdb72f94ea22a0e76b
|
[
"MIT"
] | 1
|
2020-12-16T22:38:12.000Z
|
2020-12-16T22:38:12.000Z
|
python/tests/test_tree_intersection.py
|
Yonatan1P/data-structures-and-algorithms
|
ddd647d52a3182ca01032bfdb72f94ea22a0e76b
|
[
"MIT"
] | 1
|
2020-11-14T05:37:48.000Z
|
2020-11-14T05:37:48.000Z
|
python/tests/test_tree_intersection.py
|
Yonatan1P/data-structures-and-algorithms
|
ddd647d52a3182ca01032bfdb72f94ea22a0e76b
|
[
"MIT"
] | null | null | null |
from challenges.tree_intersection.tree_intersection import find_intersection
from challenges.tree.tree import BinarySearchTree
def test_find_intersection():
tree1 = BinarySearchTree()
tree1.add(1)
tree1.add(2)
tree1.add(3)
tree1.add(4)
tree1.add(5)
tree1.add(6)
tree1.add(7)
tree1.add(8)
tree2 = BinarySearchTree()
tree2.add(12)
tree2.add(12)
tree2.add(13)
tree2.add(14)
tree2.add(15)
tree2.add(16)
tree2.add(7)
tree2.add(8)
actual = find_intersection(tree1, tree2)
expected = [7,8]
assert actual == expected
def test_empty_binary_tree():
tree1 = BinarySearchTree()
tree1.add(1)
tree1.add(2)
tree1.add(3)
tree1.add(4)
tree1.add(5)
tree1.add(6)
tree1.add(7)
tree1.add(8)
tree2 = BinarySearchTree()
actual = find_intersection(tree1, tree2)
expected = []
assert actual == expected
def test_first_empty_binary_tree():
tree2 = BinarySearchTree()
tree2.add(1)
tree2.add(2)
tree2.add(3)
tree2.add(4)
tree2.add(5)
tree2.add(6)
tree2.add(7)
tree2.add(8)
tree1 = BinarySearchTree()
actual = find_intersection(tree1, tree2)
expected = []
assert actual == expected
def test_same_tree():
tree1 = BinarySearchTree()
tree1.add(1)
tree1.add(2)
tree1.add(3)
tree1.add(4)
tree1.add(5)
tree1.add(6)
tree1.add(7)
tree1.add(8)
tree2 = BinarySearchTree()
tree2.add(1)
tree2.add(2)
tree2.add(3)
tree2.add(4)
tree2.add(5)
tree2.add(6)
tree2.add(7)
tree2.add(8)
actual = find_intersection(tree1, tree2)
expected = [1,2,3,4,5,6,7,8]
assert actual == expected
| 21.632911
| 76
| 0.627853
| 240
| 1,709
| 4.395833
| 0.133333
| 0.181991
| 0.099526
| 0.10237
| 0.798104
| 0.730806
| 0.730806
| 0.730806
| 0.730806
| 0.730806
| 0
| 0.098386
| 0.238736
| 1,709
| 78
| 77
| 21.910256
| 0.712529
| 0
| 0
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 1
| 0.054054
| false
| 0
| 0.027027
| 0
| 0.081081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a101053cd887c912399a70d0a235e2cfdc45a962
| 34
|
py
|
Python
|
evaluation/__init__.py
|
Luxios22/Dual_Norm
|
b404a03b15fc05749e0c648d9e46ffe70f6b2a80
|
[
"MIT"
] | null | null | null |
evaluation/__init__.py
|
Luxios22/Dual_Norm
|
b404a03b15fc05749e0c648d9e46ffe70f6b2a80
|
[
"MIT"
] | null | null | null |
evaluation/__init__.py
|
Luxios22/Dual_Norm
|
b404a03b15fc05749e0c648d9e46ffe70f6b2a80
|
[
"MIT"
] | null | null | null |
from .evaluation import evaluation
| 34
| 34
| 0.882353
| 4
| 34
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a10d6496b80a4c774fdd41dcbb4c0a5e756986a0
| 317
|
py
|
Python
|
torch_geometric_temporal/signal/__init__.py
|
tforgaard/pytorch_geometric_temporal
|
d3a6a55119cb8cc38cb6d941ba8f74879d02c4b8
|
[
"MIT"
] | 1,410
|
2020-06-27T03:36:19.000Z
|
2022-03-31T23:29:22.000Z
|
torch_geometric_temporal/signal/__init__.py
|
tforgaard/pytorch_geometric_temporal
|
d3a6a55119cb8cc38cb6d941ba8f74879d02c4b8
|
[
"MIT"
] | 124
|
2020-07-07T16:11:09.000Z
|
2022-03-31T07:21:53.000Z
|
torch_geometric_temporal/signal/__init__.py
|
tforgaard/pytorch_geometric_temporal
|
d3a6a55119cb8cc38cb6d941ba8f74879d02c4b8
|
[
"MIT"
] | 230
|
2020-07-27T11:13:52.000Z
|
2022-03-31T14:31:29.000Z
|
from .dynamic_graph_temporal_signal import *
from .dynamic_graph_temporal_signal_batch import *
from .static_graph_temporal_signal import *
from .static_graph_temporal_signal_batch import *
from .dynamic_graph_static_signal import *
from .dynamic_graph_static_signal_batch import *
from .train_test_split import *
| 28.818182
| 50
| 0.858044
| 44
| 317
| 5.659091
| 0.25
| 0.240964
| 0.257028
| 0.26506
| 0.859438
| 0.670683
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097792
| 317
| 10
| 51
| 31.7
| 0.870629
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a133fa0afcdcf42b74dd45b66f95e50ddbf7734f
| 41
|
py
|
Python
|
actfw_core/v4l2/__init__.py
|
Idein/actfw-core
|
44c979bbe5d32d068eed20b7d565a6de2fb9acd3
|
[
"MIT"
] | 2
|
2021-03-15T11:44:37.000Z
|
2021-05-12T09:58:35.000Z
|
actfw_core/v4l2/__init__.py
|
Idein/actfw-core
|
44c979bbe5d32d068eed20b7d565a6de2fb9acd3
|
[
"MIT"
] | 28
|
2020-12-24T02:53:37.000Z
|
2022-03-14T09:02:28.000Z
|
actfw_core/v4l2/__init__.py
|
Idein/actfw-core
|
44c979bbe5d32d068eed20b7d565a6de2fb9acd3
|
[
"MIT"
] | null | null | null |
from . import types, video # noqa: F401
| 20.5
| 40
| 0.682927
| 6
| 41
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 0.219512
| 41
| 1
| 41
| 41
| 0.78125
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a1360dd0640d6fe332d03889c6a40e96f3ddedfb
| 3,227
|
py
|
Python
|
vet_care/scripts/generate_from_history.py
|
neerajvkn/vet_care
|
14914b22e7a83265d736f9f9dc5186271ae62d66
|
[
"MIT"
] | 2
|
2020-11-23T11:14:32.000Z
|
2021-02-03T06:40:33.000Z
|
vet_care/scripts/generate_from_history.py
|
neerajvkn/vet_care
|
14914b22e7a83265d736f9f9dc5186271ae62d66
|
[
"MIT"
] | null | null | null |
vet_care/scripts/generate_from_history.py
|
neerajvkn/vet_care
|
14914b22e7a83265d736f9f9dc5186271ae62d66
|
[
"MIT"
] | 7
|
2019-11-16T14:36:33.000Z
|
2021-08-25T07:54:51.000Z
|
import csv
import datetime
import frappe
# bench execute vet_care.scripts.generate_from_history.execute --args "['./data/important_data.csv']"
def execute(filename):
patient_activities = []
not_created = []
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
timestamp = int(row.get('Date'))
cirrusvet_id = row.get('AnimalID')
description = row.get('Notes')
date = datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d')
patient = _get_patient_via_cirrusvet_id(cirrusvet_id)
if patient:
patient_activity = _pick_or_new_patient_activity(patient_activities, patient, date)
patient_activity.append('items', {'description': description})
patient_activities.append(patient_activity)
else:
not_created.append(cirrusvet_id)
created = 0
total = len(patient_activities)
for patient_activity in patient_activities:
patient_activity.save()
created = created + 1
print(f'Created ${created}/${total} patient activities')
print(not_created)
# bench execute vet_care.scripts.generate_from_history.execute --args "['./data/important_data.csv', ['1010', '2920']]"
def execute_with_filter(filename, missing_animals):
patient_activities = []
not_created = []
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
timestamp = int(row.get('Date'))
cirrusvet_id = row.get('AnimalID')
description = row.get('Notes')
if cirrusvet_id in missing_animals:
date = datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d')
patient = _get_patient_via_cirrusvet_id(cirrusvet_id)
if patient:
patient_activity = _pick_or_new_patient_activity(patient_activities, patient, date)
patient_activity.append('items', {'description': description})
patient_activities.append(patient_activity)
else:
not_created.append(cirrusvet_id)
created = 0
total = len(patient_activities)
for patient_activity in patient_activities:
patient_activity.save()
created = created + 1
print(f'Created ${created}/${total} patient activities')
print(not_created)
def _pick_or_new_patient_activity(patient_activities, patient, date):
def filter_activity(activity):
return activity.patient == patient and activity.posting_date == date
existing = list(filter(filter_activity, patient_activities))
if existing:
return existing[0]
return frappe.get_doc({
'doctype': 'Patient Activity',
'patient': patient,
'posting_date': date
})
def _get_patient_via_cirrusvet_id(cirrusvet_id):
patient_data = frappe.db.sql(
"""SELECT name FROM `tabPatient` WHERE vc_cirrusvet=%s""",
cirrusvet_id,
as_dict=True
)
if patient_data:
return patient_data[0].get('name')
return None
| 37.964706
| 119
| 0.634645
| 350
| 3,227
| 5.597143
| 0.24
| 0.121491
| 0.061256
| 0.033691
| 0.748851
| 0.748851
| 0.748851
| 0.730985
| 0.730985
| 0.704441
| 0
| 0.005907
| 0.265572
| 3,227
| 84
| 120
| 38.416667
| 0.820675
| 0.067245
| 0
| 0.638889
| 1
| 0
| 0.075254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069444
| false
| 0
| 0.041667
| 0.013889
| 0.180556
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a17ec4639df7fdbb530566bb66941b664210b137
| 96
|
py
|
Python
|
bhinneka/utils.py
|
kangfend/scrapy-bhinneka
|
a4a6e4ae5295e8bf83b213c1dace9c7de70f128c
|
[
"MIT"
] | 1
|
2016-10-04T10:10:05.000Z
|
2016-10-04T10:10:05.000Z
|
bhinneka/utils.py
|
kangfend/scrapy-bhinneka
|
a4a6e4ae5295e8bf83b213c1dace9c7de70f128c
|
[
"MIT"
] | null | null | null |
bhinneka/utils.py
|
kangfend/scrapy-bhinneka
|
a4a6e4ae5295e8bf83b213c1dace9c7de70f128c
|
[
"MIT"
] | null | null | null |
from bhinneka.settings import BASE_URL
def get_absolute_url(path):
return BASE_URL + path
| 16
| 38
| 0.78125
| 15
| 96
| 4.733333
| 0.733333
| 0.197183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 96
| 5
| 39
| 19.2
| 0.8875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
a19de4fc6f1c20cd12d2dfef53eca7293ca3f561
| 38
|
py
|
Python
|
scooby/plugins/processtime/__init__.py
|
zetaab/django-scooby-profiler
|
c4e63b5751a7aec2b01df3b46368c6ad40ec51e3
|
[
"MIT"
] | 9
|
2018-09-20T16:45:40.000Z
|
2021-08-08T07:04:55.000Z
|
scooby/plugins/processtime/__init__.py
|
zetaab/django-scooby-profiler
|
c4e63b5751a7aec2b01df3b46368c6ad40ec51e3
|
[
"MIT"
] | 7
|
2018-09-14T10:34:37.000Z
|
2019-04-20T06:54:29.000Z
|
scooby/plugins/processtime/__init__.py
|
zetaab/django-scooby-profiler
|
c4e63b5751a7aec2b01df3b46368c6ad40ec51e3
|
[
"MIT"
] | 3
|
2018-09-14T10:39:51.000Z
|
2019-06-26T09:32:13.000Z
|
from .plugin import ProcessTimePlugin
| 19
| 37
| 0.868421
| 4
| 38
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a1b746b6ceeb8b3c1f65c79e0b5184f641adb774
| 58
|
py
|
Python
|
a.20.7.py
|
AmanMishra148/python-repo
|
5b07fe19f2058fc2c909b96ae173f4346ac8d3da
|
[
"bzip2-1.0.6"
] | null | null | null |
a.20.7.py
|
AmanMishra148/python-repo
|
5b07fe19f2058fc2c909b96ae173f4346ac8d3da
|
[
"bzip2-1.0.6"
] | 1
|
2021-10-18T09:59:45.000Z
|
2021-10-18T09:59:45.000Z
|
a.20.7.py
|
AmanMishra148/python-repo
|
5b07fe19f2058fc2c909b96ae173f4346ac8d3da
|
[
"bzip2-1.0.6"
] | 4
|
2021-10-18T09:40:54.000Z
|
2021-10-19T14:14:28.000Z
|
def si(p,r,t):
n= (p+r+t)//3
return n
| 8.285714
| 17
| 0.344828
| 12
| 58
| 1.666667
| 0.666667
| 0.2
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.448276
| 58
| 6
| 18
| 9.666667
| 0.59375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
b805e135095833b9aacb9e146ceaa3844c6781fb
| 670
|
py
|
Python
|
setup.py
|
comradepopo/p4rmyknife
|
e34a12a86cc090e3add25dc5baa7f6629586a4c6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
comradepopo/p4rmyknife
|
e34a12a86cc090e3add25dc5baa7f6629586a4c6
|
[
"Apache-2.0"
] | 1
|
2019-10-18T23:10:11.000Z
|
2019-10-18T23:10:11.000Z
|
setup.py
|
comradepopo/p4rmyknife
|
e34a12a86cc090e3add25dc5baa7f6629586a4c6
|
[
"Apache-2.0"
] | null | null | null |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
'description': 'P4rmyKnife - The Swiss Army Knife for P4',
'author': 'Assembla, Inc.',
'url': 'https://assembla.com/'
'author_email': 'louis@assembla.com',
'version': '0.1',
'install_requires': [],
'packages': ['p4rmyknife'],
'scripts': [],
'name': 'p4rmyknife'
setup(name='p4rmyknife',
description='P4rmyKnife - The Swiss Army Knife for P4',
author='Assembla, Inc.',
url='https://assembla.com/'
author_email='louis@assembla.com',
version='0.1',
install_requires=[],
packages=['p4rmyknife'],
scripts=[]
)
| 25.769231
| 62
| 0.626866
| 73
| 670
| 5.69863
| 0.452055
| 0.105769
| 0.115385
| 0.139423
| 0.745192
| 0.745192
| 0.745192
| 0.745192
| 0.745192
| 0.745192
| 0
| 0.022346
| 0.198507
| 670
| 25
| 63
| 26.8
| 0.752328
| 0
| 0
| 0
| 0
| 0
| 0.456716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.130435
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
62c3efcf40a53a46324b9e3f1578e57e7300a9cb
| 21
|
py
|
Python
|
lib/utils/__init__.py
|
jwyang/C3Net.pytorch
|
70026fc80c5427484268c428a9dcd4cde2e8197f
|
[
"MIT"
] | 43
|
2019-12-13T06:13:40.000Z
|
2021-07-25T06:29:17.000Z
|
lib/utils/__init__.py
|
jwyang/C3Net.pytorch
|
70026fc80c5427484268c428a9dcd4cde2e8197f
|
[
"MIT"
] | 2
|
2020-12-05T14:24:17.000Z
|
2020-12-24T09:47:10.000Z
|
lib/utils/__init__.py
|
jwyang/C3Net.pytorch
|
70026fc80c5427484268c428a9dcd4cde2e8197f
|
[
"MIT"
] | 4
|
2019-12-16T20:25:20.000Z
|
2020-06-23T08:45:17.000Z
|
from .verbo import *
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62e085ec76ed466edc7957012e2209ee7eb9a47a
| 131
|
py
|
Python
|
pair-ranking-cnn/utils.py
|
shinoyuki222/torch-light
|
4799805d9bcae82a9f12a574dcf9fdd838c92ee9
|
[
"MIT"
] | 310
|
2018-11-02T10:12:33.000Z
|
2022-03-30T02:59:51.000Z
|
pair-ranking-cnn/utils.py
|
shinoyuki222/torch-light
|
4799805d9bcae82a9f12a574dcf9fdd838c92ee9
|
[
"MIT"
] | 14
|
2018-11-08T10:09:46.000Z
|
2021-07-30T08:54:33.000Z
|
pair-ranking-cnn/utils.py
|
shinoyuki222/torch-light
|
4799805d9bcae82a9f12a574dcf9fdd838c92ee9
|
[
"MIT"
] | 152
|
2018-11-02T13:00:49.000Z
|
2022-03-28T12:45:08.000Z
|
import const
def corpora2idx(sents, ind2idx):
return [[ind2idx[w] if w in ind2idx else const.UNK for w in s] for s in sents]
| 21.833333
| 82
| 0.709924
| 24
| 131
| 3.875
| 0.583333
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.206107
| 131
| 5
| 83
| 26.2
| 0.855769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
1a03179c783f6a71443f0dfefb1dcdf8bf7a653b
| 40
|
py
|
Python
|
samplePythonfiles/cc.py
|
fazilsha/python-automation
|
80ce94642a94276d3b970ae390a5d1464ad2f2b8
|
[
"MIT"
] | null | null | null |
samplePythonfiles/cc.py
|
fazilsha/python-automation
|
80ce94642a94276d3b970ae390a5d1464ad2f2b8
|
[
"MIT"
] | null | null | null |
samplePythonfiles/cc.py
|
fazilsha/python-automation
|
80ce94642a94276d3b970ae390a5d1464ad2f2b8
|
[
"MIT"
] | null | null | null |
print("File dd.py sucessfully executed")
| 40
| 40
| 0.8
| 6
| 40
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.864865
| 0
| 0
| 0
| 0
| 0
| 0.756098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
a7f7aa50e11186fe4bb67eb3b4c81147ea13ad7a
| 29
|
py
|
Python
|
app.py
|
00MB/lottocoin
|
ebf27f5a02169d948e8633b1dc5d5ad37ee1bb4a
|
[
"MIT"
] | 2
|
2021-02-10T01:40:36.000Z
|
2021-02-10T01:41:22.000Z
|
app.py
|
00MB/lottocoin
|
ebf27f5a02169d948e8633b1dc5d5ad37ee1bb4a
|
[
"MIT"
] | null | null | null |
app.py
|
00MB/lottocoin
|
ebf27f5a02169d948e8633b1dc5d5ad37ee1bb4a
|
[
"MIT"
] | null | null | null |
from lottocoin import app
| 5.8
| 25
| 0.758621
| 4
| 29
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 29
| 4
| 26
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d8aee839cc7a45416c287f7da1460240d9b1dd8
| 28
|
py
|
Python
|
inlinec/__init__.py
|
ssize-t/inlinec
|
20eca6bf8556a77906ba5f420f09006d6daf4355
|
[
"Apache-2.0"
] | 22
|
2020-10-10T18:25:04.000Z
|
2021-11-09T18:56:34.000Z
|
inlinec/__init__.py
|
ssize-t/inlinec
|
20eca6bf8556a77906ba5f420f09006d6daf4355
|
[
"Apache-2.0"
] | 1
|
2020-11-10T03:50:05.000Z
|
2020-11-10T03:50:05.000Z
|
inlinec/__init__.py
|
ssize-t/inlinec
|
20eca6bf8556a77906ba5f420f09006d6daf4355
|
[
"Apache-2.0"
] | 2
|
2020-10-10T16:09:42.000Z
|
2021-03-10T16:43:11.000Z
|
from .inlinec import inlinec
| 28
| 28
| 0.857143
| 4
| 28
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3dc364b351e4b86533cd7ac27b461f7ca088a0a9
| 2,126
|
py
|
Python
|
tests/test_runner/test_discover_runner.py
|
tomleo/django
|
ebfb71c64a786620947c9d598fd1ebae2958acff
|
[
"BSD-3-Clause"
] | 1
|
2015-09-09T08:48:03.000Z
|
2015-09-09T08:48:03.000Z
|
tests/test_runner/test_discover_runner.py
|
tomleo/django
|
ebfb71c64a786620947c9d598fd1ebae2958acff
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_runner/test_discover_runner.py
|
tomleo/django
|
ebfb71c64a786620947c9d598fd1ebae2958acff
|
[
"BSD-3-Clause"
] | 1
|
2020-04-12T19:00:12.000Z
|
2020-04-12T19:00:12.000Z
|
from django.test import TestCase
from django.test.runner import DiscoverRunner
class DiscoverRunnerTest(TestCase):
def test_dotted_test_module(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample"],
).countTestCases()
self.assertEqual(count, 3)
def test_dotted_test_class_vanilla_unittest(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestVanillaUnittest"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_class_unittest2(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestUnittest2"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_class_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_vanilla_unittest(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestVanillaUnittest.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_unittest2(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestUnittest2.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_pattern(self):
count = DiscoverRunner(
pattern="*_tests.py",
).build_suite(["test_discovery_sample"]).countTestCases()
self.assertEqual(count, 1)
def test_file_path(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample/"],
).countTestCases()
self.assertEqual(count, 4)
| 30.811594
| 83
| 0.676388
| 212
| 2,126
| 6.443396
| 0.165094
| 0.04612
| 0.151537
| 0.151537
| 0.871157
| 0.830161
| 0.830161
| 0.830161
| 0.807467
| 0.67716
| 0
| 0.007855
| 0.221543
| 2,126
| 68
| 84
| 31.264706
| 0.817523
| 0
| 0
| 0.479167
| 0
| 0
| 0.203669
| 0.198965
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.1875
| false
| 0
| 0.041667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3dca6b4523ea884f293c6a6b346cc8182bedf764
| 28
|
py
|
Python
|
tunga/preprocessing/__init__.py
|
tahtaciburak/tunga
|
e71a4fa393d692779ab6d674673c5674d7287dac
|
[
"MIT"
] | 5
|
2020-07-31T19:26:46.000Z
|
2020-10-23T11:49:06.000Z
|
tunga/preprocessing/__init__.py
|
tunga-ml/tunga
|
823fd762054fd513300025cbb1fc799f7e3cf6b1
|
[
"MIT"
] | null | null | null |
tunga/preprocessing/__init__.py
|
tunga-ml/tunga
|
823fd762054fd513300025cbb1fc799f7e3cf6b1
|
[
"MIT"
] | 1
|
2021-09-10T08:24:13.000Z
|
2021-09-10T08:24:13.000Z
|
from .normalization import *
| 28
| 28
| 0.821429
| 3
| 28
| 7.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9a8b2c9a4fe128befea072dd96f7b456a616ecd8
| 15,178
|
py
|
Python
|
YOLO/Stronger-yolo-pytorch/port2tf/yolov3.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 12
|
2020-03-25T01:24:22.000Z
|
2021-09-18T06:40:16.000Z
|
YOLO/Stronger-yolo-pytorch/port2tf/yolov3.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 1
|
2020-04-22T07:52:36.000Z
|
2020-04-22T07:52:36.000Z
|
YOLO/Stronger-yolo-pytorch/port2tf/yolov3.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 4
|
2020-03-25T01:24:26.000Z
|
2020-09-20T11:29:09.000Z
|
# coding:utf-8
import numpy as np
import tensorflow as tf
from layers import *
from MobilenetV2 import MobilenetV2,MobilenetV2_dynamic
class YOLOV3(object):
def __init__(self, training,numcls=20):
self.__training = training
self.__num_classes = numcls
self.__strides=[8,16,32]
def build_nework(self, input_data, val_reuse=False,gt_per_grid=3):
"""
:param input_data: shape为(batch_size, input_size, input_size, 3)
:return: conv_sbbox, conv_mbbox, conv_lbbox, pred_sbbox, pred_mbbox, pred_lbbox
conv_sbbox的shape为(batch_size, input_size / 8, input_size / 8, gt_per_grid * (5 + num_classes))
conv_mbbox的shape为(batch_size, input_size / 16, input_size / 16, gt_per_grid * (5 + num_classes))
conv_lbbox的shape为(batch_size, input_size / 32, input_size / 32, gt_per_grid * (5 + num_classes))
conv_?是YOLO的原始卷积输出(raw_dx, raw_dy, raw_dw, raw_dh, raw_conf, raw_prob)
pred_sbbox的shape为(batch_size, input_size / 8, input_size / 8, gt_per_grid, 5 + num_classes)
pred_mbbox的shape为(batch_size, input_size / 16, input_size / 16, gt_per_grid, 5 + num_classes)
pred_lbbox的shape为(batch_size, input_size / 32, input_size / 32, gt_per_grid, 5 + num_classes)
pred_?是YOLO预测bbox的信息(x, y, w, h, conf, prob),(x, y, w, h)的大小是相对于input_size的
"""
net_name = 'YoloV3'
with tf.variable_scope(net_name, reuse=val_reuse):
feature_map_s, feature_map_m, feature_map_l = MobilenetV2(input_data, self.__training)
#jiangwei
conv = convolutional(name='conv0', input_data=feature_map_l, filters_shape=(1, 1, 1280, 512),
training=self.__training)
conv = separable_conv(name='conv1', input_data=conv, input_c=512, output_c=1024, training=self.__training)
conv = convolutional(name='conv2', input_data=conv, filters_shape=(1, 1, 1024, 512),
training=self.__training)
conv = separable_conv(name='conv3', input_data=conv, input_c=512, output_c=1024, training=self.__training)
conv = convolutional(name='conv4', input_data=conv, filters_shape=(1, 1, 1024, 512),
training=self.__training)
# ----------**********---------- Detection branch of large object ----------**********----------
conv_lbbox = separable_conv(name='conv5', input_data=conv, input_c=512, output_c=1024,
training=self.__training)
conv_lbbox = convolutional(name='conv6', input_data=conv_lbbox,
filters_shape=(1, 1, 1024, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False)
pred_lbbox = decode(name='pred_lbbox', conv_output=conv_lbbox,
num_classes=self.__num_classes, stride=self.__strides[2])
# ----------**********---------- Detection branch of large object ----------**********----------
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv7', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training)
conv = upsample(name='upsample0', input_data=conv)
conv = route(name='route0', previous_output=feature_map_m, current_output=conv)
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional('conv8', input_data=conv, filters_shape=(1, 1, 96 + 256, 256),
training=self.__training)
conv = separable_conv('conv9', input_data=conv, input_c=256, output_c=512, training=self.__training)
conv = convolutional('conv10', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training)
conv = separable_conv('conv11', input_data=conv, input_c=256, output_c=512, training=self.__training)
conv = convolutional('conv12', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training)
# ----------**********---------- Detection branch of middle object ----------**********----------
conv_mbbox = separable_conv(name='conv13', input_data=conv, input_c=256, output_c=512,
training=self.__training)
conv_mbbox = convolutional(name='conv14', input_data=conv_mbbox,
filters_shape=(1, 1, 512, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False)
pred_mbbox = decode(name='pred_mbbox', conv_output=conv_mbbox,
num_classes=self.__num_classes, stride=self.__strides[1])
# ----------**********---------- Detection branch of middle object ----------**********----------
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv15', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training)
conv = upsample(name='upsample1', input_data=conv)
conv = route(name='route1', previous_output=feature_map_s, current_output=conv)
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv16', input_data=conv, filters_shape=(1, 1, 32 + 128, 128),
training=self.__training)
conv = separable_conv(name='conv17', input_data=conv, input_c=128, output_c=256, training=self.__training)
conv = convolutional(name='conv18', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training)
conv = separable_conv(name='conv19', input_data=conv, input_c=128, output_c=256, training=self.__training)
conv = convolutional(name='conv20', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training)
# ----------**********---------- Detection branch of small object ----------**********----------
conv_sbbox = separable_conv(name='conv21', input_data=conv, input_c=128, output_c=256,
training=self.__training)
conv_sbbox = convolutional(name='conv22', input_data=conv_sbbox,
filters_shape=(1, 1, 256, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False)
pred_sbbox = decode(name='pred_sbbox', conv_output=conv_sbbox,
num_classes=self.__num_classes, stride=self.__strides[0])
# ----------**********---------- Detection branch of small object ----------**********----------
for var in tf.global_variables(net_name):
tf.add_to_collection(net_name, var)
return conv_sbbox, conv_mbbox, conv_lbbox, pred_sbbox, pred_mbbox, pred_lbbox
def build_network_dynamic(self, input_data,statedict,val_reuse=False,inputsize=544,gt_per_grid=3):
net_name = 'YoloV3'
with tf.variable_scope(net_name, reuse=val_reuse):
feature_map_s, feature_map_m, feature_map_l = MobilenetV2_dynamic(input_data, self.__training,statedict)
conv = convolutional(name='conv0', input_data=feature_map_l, filters_shape=(1, 1, 1280, 512),
training=self.__training,statedict=statedict['headslarge.conv0'])
conv = separable_conv(name='conv1', input_data=conv, input_c=512, output_c=1024, training=self.__training,statedict=statedict['headslarge.conv1'])
conv = convolutional(name='conv2', input_data=conv, filters_shape=(1, 1, 1024, 512),
training=self.__training,statedict=statedict['headslarge.conv2'])
conv = separable_conv(name='conv3', input_data=conv, input_c=512, output_c=1024, training=self.__training,statedict=statedict['headslarge.conv3'])
conv = convolutional(name='conv4', input_data=conv, filters_shape=(1, 1, 1024, 512),
training=self.__training,statedict=statedict['headslarge.conv4'])
# ----------**********---------- Detection branch of large object ----------**********----------
conv_lbbox = separable_conv(name='conv5', input_data=conv, input_c=512, output_c=1024,
training=self.__training,statedict=statedict['detlarge.conv5'])
conv_lbbox = convolutional(name='conv6', input_data=conv_lbbox,
filters_shape=(1, 1, 1024, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False,statedict=statedict['detlarge.conv6'])
pred_lbbox = decode_validate(name='pred_lbbox', conv_output=conv_lbbox,
num_classes=self.__num_classes, stride=self.__strides[2], shape=inputsize // 32,
gt_pergrid=gt_per_grid)
# ----------**********---------- Detection branch of large object ----------**********----------
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv7', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training,statedict=statedict['mergelarge.conv7'])
conv = upsample_decode(name='upsample0', input_data=conv,shape1=inputsize//32,shape2=inputsize//32)
conv = route(name='route0', previous_output=feature_map_m, current_output=conv)
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional('conv8', input_data=conv, filters_shape=(1, 1, 96 + 256, 256),
training=self.__training,statedict=statedict['headsmid.conv8'])
conv = separable_conv('conv9', input_data=conv, input_c=256, output_c=512, training=self.__training,statedict=statedict['headsmid.conv9'])
conv = convolutional('conv10', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training,statedict=statedict['headsmid.conv10'])
conv = separable_conv('conv11', input_data=conv, input_c=256, output_c=512, training=self.__training,statedict=statedict['headsmid.conv11'])
conv = convolutional('conv12', input_data=conv, filters_shape=(1, 1, 512, 256),
training=self.__training,statedict=statedict['headsmid.conv12'])
# ----------**********---------- Detection branch of middle object ----------**********----------
conv_mbbox = separable_conv(name='conv13', input_data=conv, input_c=256, output_c=512,
training=self.__training,statedict=statedict['detmid.conv13'])
conv_mbbox = convolutional(name='conv14', input_data=conv_mbbox,
filters_shape=(1, 1, 512, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False,statedict=statedict['detmid.conv14'])
pred_mbbox = decode_validate(name='pred_mbbox', conv_output=conv_mbbox,
num_classes=self.__num_classes, stride=self.__strides[1], shape=inputsize // 16,
gt_pergrid=gt_per_grid)
# ----------**********---------- Detection branch of middle object ----------**********----------
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv15', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training,statedict=statedict['mergemid.conv15'])
conv = upsample_decode(name='upsample1', input_data=conv,shape1=inputsize//16,shape2=inputsize//16)
conv = route(name='route1', previous_output=feature_map_s, current_output=conv)
# ----------**********---------- up sample and merge features map ----------**********----------
conv = convolutional(name='conv16', input_data=conv, filters_shape=(1, 1, 32 + 128, 128),
training=self.__training,statedict=statedict['headsmall.conv16'])
conv = separable_conv(name='conv17', input_data=conv, input_c=128, output_c=256, training=self.__training,statedict=statedict['headsmall.conv17'])
conv = convolutional(name='conv18', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training,statedict=statedict['headsmall.conv18'])
conv = separable_conv(name='conv19', input_data=conv, input_c=128, output_c=256, training=self.__training,statedict=statedict['headsmall.conv19'])
conv = convolutional(name='conv20', input_data=conv, filters_shape=(1, 1, 256, 128),
training=self.__training,statedict=statedict['headsmall.conv20'])
# ----------**********---------- Detection branch of small object ----------**********----------
conv_sbbox = separable_conv(name='conv21', input_data=conv, input_c=128, output_c=256,
training=self.__training,statedict=statedict['detsmall.conv21'])
conv_sbbox = convolutional(name='conv22', input_data=conv_sbbox,
filters_shape=(1, 1, 256, gt_per_grid * (self.__num_classes + 5)),
training=self.__training, downsample=False, activate=False, bn=False,statedict=statedict['detsmall.conv22'])
pred_sbbox = decode_validate(name='pred_sbbox', conv_output=conv_sbbox,
num_classes=self.__num_classes, stride=self.__strides[0], shape=inputsize // 8,
gt_pergrid=gt_per_grid)
# ----------**********---------- Detection branch of small object ----------**********----------
pred_sbbox = tf.reshape(pred_sbbox, (-1, 5 + self.__num_classes))
pred_mbbox = tf.reshape(pred_mbbox, (-1, 5 + self.__num_classes))
pred_lbbox = tf.reshape(pred_lbbox, (-1, 5 + self.__num_classes))
pred_bbox = tf.concat([pred_sbbox, pred_mbbox, pred_lbbox], 0, name='output/boxconcat')
for var in tf.global_variables(net_name):
tf.add_to_collection(net_name, var)
return pred_bbox
| 80.306878
| 158
| 0.566346
| 1,640
| 15,178
| 4.928659
| 0.102439
| 0.06124
| 0.077199
| 0.048497
| 0.866263
| 0.849066
| 0.832117
| 0.819127
| 0.810343
| 0.810343
| 0
| 0.047602
| 0.258137
| 15,178
| 188
| 159
| 80.734043
| 0.670249
| 0.183555
| 0
| 0.529412
| 0
| 0
| 0.061263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022059
| false
| 0
| 0.029412
| 0
| 0.073529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9ad73e40610067893659f1466d9493e1d1fdb576
| 49
|
py
|
Python
|
ledger/checkout/models.py
|
jawaidm/ledger
|
7094f3320d6a409a2a0080e70fa7c2b9dba4a715
|
[
"Apache-2.0"
] | 59
|
2015-08-29T10:51:34.000Z
|
2021-11-03T10:00:25.000Z
|
ledger/checkout/models.py
|
jawaidm/ledger
|
7094f3320d6a409a2a0080e70fa7c2b9dba4a715
|
[
"Apache-2.0"
] | 162
|
2018-02-16T05:13:03.000Z
|
2021-05-14T02:47:37.000Z
|
ledger/checkout/models.py
|
jawaidm/ledger
|
7094f3320d6a409a2a0080e70fa7c2b9dba4a715
|
[
"Apache-2.0"
] | 22
|
2015-08-10T10:46:18.000Z
|
2020-04-04T07:11:55.000Z
|
from oscar.apps.checkout.models import * # noqa
| 24.5
| 48
| 0.755102
| 7
| 49
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 49
| 1
| 49
| 49
| 0.880952
| 0.081633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9af29a94a64ce15c2f18ac01d5658596e67aa248
| 48
|
py
|
Python
|
dachar/utils/__init__.py
|
roocs/dachar
|
687b6acb535f634791d13a435cded5f97cae8e76
|
[
"BSD-3-Clause"
] | 2
|
2020-05-01T11:17:06.000Z
|
2020-11-23T10:37:24.000Z
|
dachar/utils/__init__.py
|
roocs/dachar
|
687b6acb535f634791d13a435cded5f97cae8e76
|
[
"BSD-3-Clause"
] | 69
|
2020-03-26T15:39:26.000Z
|
2022-01-14T14:34:39.000Z
|
dachar/utils/__init__.py
|
roocs/dachar
|
687b6acb535f634791d13a435cded5f97cae8e76
|
[
"BSD-3-Clause"
] | null | null | null |
from .common import *
from .json_store import *
| 16
| 25
| 0.75
| 7
| 48
| 5
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 2
| 26
| 24
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b118f2f3e6c0e9617cb2cf673e9a7f3e68d6f9ce
| 53
|
py
|
Python
|
basicts/archs/DGCRN_arch/__init__.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | 3
|
2022-02-22T12:50:08.000Z
|
2022-03-13T03:38:46.000Z
|
basicts/archs/DGCRN_arch/__init__.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
basicts/archs/DGCRN_arch/__init__.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
from basicts.archs.DGCRN_arch.DGCRN_arch import DGCRN
| 53
| 53
| 0.886792
| 9
| 53
| 5
| 0.666667
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056604
| 53
| 1
| 53
| 53
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b1716479f1c26f49cf955c116938436d2e898588
| 21
|
py
|
Python
|
fastagram/tags/models/__init__.py
|
dobestan/fastagram
|
8c57401512d7621890a4f160d4b27c6e0d3ab326
|
[
"MIT"
] | 1
|
2016-03-27T10:36:01.000Z
|
2016-03-27T10:36:01.000Z
|
fastagram/tags/models/__init__.py
|
dobestan/django-101-fastagram
|
8c57401512d7621890a4f160d4b27c6e0d3ab326
|
[
"MIT"
] | 3
|
2016-03-25T05:32:39.000Z
|
2016-03-28T04:59:17.000Z
|
fastagram/tags/models/__init__.py
|
dobestan/django-101-fastagram
|
8c57401512d7621890a4f160d4b27c6e0d3ab326
|
[
"MIT"
] | 1
|
2016-03-28T16:35:36.000Z
|
2016-03-28T16:35:36.000Z
|
from .tag import Tag
| 10.5
| 20
| 0.761905
| 4
| 21
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b179ff426e1a26e74d3b6cc6592435b4bf9294c3
| 224
|
py
|
Python
|
face_api/admin.py
|
glen-s-abraham/face-detection-api
|
ce671a9750065c0fc82d0dd668299738f1c07508
|
[
"MIT"
] | null | null | null |
face_api/admin.py
|
glen-s-abraham/face-detection-api
|
ce671a9750065c0fc82d0dd668299738f1c07508
|
[
"MIT"
] | null | null | null |
face_api/admin.py
|
glen-s-abraham/face-detection-api
|
ce671a9750065c0fc82d0dd668299738f1c07508
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from face_api.models import KnowledgeDatabase
from face_api.models import ImageUploads
# Register your models here.
admin.site.register(KnowledgeDatabase)
admin.site.register(ImageUploads)
| 24.888889
| 45
| 0.848214
| 29
| 224
| 6.482759
| 0.482759
| 0.085106
| 0.117021
| 0.180851
| 0.244681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 224
| 8
| 46
| 28
| 0.926108
| 0.116071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b189f5ce6dc38c0cbcc1102caf8a791a932e5870
| 12,747
|
py
|
Python
|
tests/asgi/test_configuration.py
|
mrmilu/ariadne
|
cba577bd4befd16e0ec22701a5ac68f719661a9a
|
[
"BSD-3-Clause"
] | 1
|
2020-05-28T01:48:58.000Z
|
2020-05-28T01:48:58.000Z
|
tests/asgi/test_configuration.py
|
mrmilu/ariadne
|
cba577bd4befd16e0ec22701a5ac68f719661a9a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/asgi/test_configuration.py
|
mrmilu/ariadne
|
cba577bd4befd16e0ec22701a5ac68f719661a9a
|
[
"BSD-3-Clause"
] | null | null | null |
# pylint: disable=not-context-manager
from unittest.mock import ANY, Mock
from starlette.testclient import TestClient
from ariadne.asgi import (
GQL_CONNECTION_ACK,
GQL_CONNECTION_INIT,
GQL_DATA,
GQL_ERROR,
GQL_START,
GraphQL,
)
from ariadne.types import Extension
def test_custom_context_value_is_passed_to_resolvers(schema):
app = GraphQL(schema, context_value={"test": "TEST-CONTEXT"})
client = TestClient(app)
response = client.post("/", json={"query": "{ testContext }"})
assert response.json() == {"data": {"testContext": "TEST-CONTEXT"}}
def test_custom_context_value_function_is_set_and_called_by_app(schema):
get_context_value = Mock(return_value=True)
app = GraphQL(schema, context_value=get_context_value)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_context_value.assert_called_once()
def test_custom_context_value_function_result_is_passed_to_resolvers(schema):
get_context_value = Mock(return_value={"test": "TEST-CONTEXT"})
app = GraphQL(schema, context_value=get_context_value)
client = TestClient(app)
response = client.post("/", json={"query": "{ testContext }"})
assert response.json() == {"data": {"testContext": "TEST-CONTEXT"}}
def test_async_context_value_function_result_is_awaited_before_passing_to_resolvers(
schema,
):
async def get_context_value(*_):
return {"test": "TEST-ASYNC-CONTEXT"}
app = GraphQL(schema, context_value=get_context_value)
client = TestClient(app)
response = client.post("/", json={"query": "{ testContext }"})
assert response.json() == {"data": {"testContext": "TEST-ASYNC-CONTEXT"}}
def test_custom_root_value_is_passed_to_query_resolvers(schema):
app = GraphQL(schema, root_value={"test": "TEST-ROOT"})
client = TestClient(app)
response = client.post("/", json={"query": "{ testRoot }"})
assert response.json() == {"data": {"testRoot": "TEST-ROOT"}}
def test_custom_root_value_is_passed_to_subscription_resolvers(schema):
app = GraphQL(schema, root_value={"test": "TEST-ROOT"})
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { testRoot }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
assert response["payload"] == {"data": {"testRoot": "TEST-ROOT"}}
def test_custom_root_value_function_is_called_by_query(schema):
get_root_value = Mock(return_value=True)
app = GraphQL(schema, root_value=get_root_value)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_root_value.assert_called_once()
def test_custom_root_value_function_is_called_by_subscription(schema):
get_root_value = Mock(return_value=True)
app = GraphQL(schema, root_value=get_root_value)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { ping }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
get_root_value.assert_called_once()
def test_custom_root_value_function_is_called_with_context_value(schema):
get_root_value = Mock(return_value=True)
app = GraphQL(
schema, context_value={"test": "TEST-CONTEXT"}, root_value=get_root_value
)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_root_value.assert_called_once_with({"test": "TEST-CONTEXT"}, ANY)
def test_custom_validation_rule_is_called_by_query_validation(schema, validation_rule):
app = GraphQL(schema, validation_rules=[validation_rule])
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
validation_rule.assert_called_once()
def test_custom_validation_rules_function_is_set_and_called_on_query_execution(
schema, validation_rule
):
get_validation_rules = Mock(return_value=[validation_rule])
app = GraphQL(schema, validation_rules=get_validation_rules)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_validation_rules.assert_called_once()
validation_rule.assert_called_once()
def test_custom_validation_rules_function_is_called_with_context_value(
schema, validation_rule
):
get_validation_rules = Mock(return_value=[validation_rule])
app = GraphQL(
schema,
context_value={"test": "TEST-CONTEXT"},
validation_rules=get_validation_rules,
)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_validation_rules.assert_called_once_with({"test": "TEST-CONTEXT"}, ANY, ANY)
def execute_failing_query(app):
client = TestClient(app)
client.post("/", json={"query": "{ error }"})
def test_default_logger_is_used_to_log_error_if_custom_is_not_set(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema)
execute_failing_query(app)
logging_mock.getLogger.assert_called_once_with("ariadne")
def test_custom_logger_is_used_to_log_query_error(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema, logger="custom")
execute_failing_query(app)
logging_mock.getLogger.assert_called_once_with("custom")
def test_custom_logger_is_used_to_log_subscription_source_error(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema, logger="custom")
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { sourceError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
logging_mock.getLogger.assert_called_once_with("custom")
def test_custom_logger_is_used_to_log_subscription_resolver_error(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema, logger="custom")
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { resolverError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
logging_mock.getLogger.assert_called_once_with("custom")
def test_custom_error_formatter_is_used_to_format_query_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
execute_failing_query(app)
error_formatter.assert_called_once()
def test_custom_error_formatter_is_used_to_format_subscription_syntax_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{"type": GQL_START, "id": "test1", "payload": {"query": "subscription {"}}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_ERROR
assert response["id"] == "test1"
error_formatter.assert_called_once()
def test_custom_error_formatter_is_used_to_format_subscription_source_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { sourceError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
assert response["id"] == "test1"
error_formatter.assert_called_once()
def test_custom_error_formatter_is_used_to_format_subscription_resolver_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { resolverError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
assert response["id"] == "test1"
error_formatter.assert_called_once()
def test_error_formatter_is_called_with_debug_enabled(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, debug=True, error_formatter=error_formatter)
execute_failing_query(app)
error_formatter.assert_called_once_with(ANY, True)
def test_error_formatter_is_called_with_debug_disabled(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, debug=False, error_formatter=error_formatter)
execute_failing_query(app)
error_formatter.assert_called_once_with(ANY, False)
class CustomExtension(Extension):
async def resolve(self, next_, parent, info, **kwargs):
return next_(parent, info, **kwargs).lower()
def test_extension_from_option_are_passed_to_query_executor(schema):
app = GraphQL(schema, extensions=[CustomExtension])
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "hello, bob!"}}
def test_extensions_function_result_is_passed_to_query_executor(schema):
def get_extensions(*_):
return [CustomExtension]
app = GraphQL(schema, extensions=get_extensions)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "hello, bob!"}}
def test_async_extensions_function_result_is_passed_to_query_executor(schema):
async def get_extensions(*_):
return [CustomExtension]
app = GraphQL(schema, extensions=get_extensions)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "hello, bob!"}}
def middleware(next_fn, *args, **kwargs):
value = next_fn(*args, **kwargs)
return f"**{value}**"
def test_middlewares_are_passed_to_query_executor(schema):
app = GraphQL(schema, middleware=[middleware])
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "**Hello, BOB!**"}}
def test_middleware_function_result_is_passed_to_query_executor(schema):
def get_middleware(*_):
return [middleware]
app = GraphQL(schema, middleware=get_middleware)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "**Hello, BOB!**"}}
def test_async_middleware_function_result_is_passed_to_query_executor(schema):
async def get_middleware(*_):
return [middleware]
app = GraphQL(schema, middleware=get_middleware)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "**Hello, BOB!**"}}
| 36.524355
| 87
| 0.672394
| 1,500
| 12,747
| 5.372
| 0.079333
| 0.052122
| 0.055597
| 0.040084
| 0.889551
| 0.871432
| 0.865724
| 0.840035
| 0.797717
| 0.764954
| 0
| 0.000974
| 0.194556
| 12,747
| 348
| 88
| 36.62931
| 0.783871
| 0.002746
| 0
| 0.633094
| 0
| 0
| 0.112746
| 0.006924
| 0
| 0
| 0
| 0
| 0.165468
| 1
| 0.115108
| false
| 0.039568
| 0.014388
| 0.007194
| 0.158273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b18b42a0184f3b3519a30ad5c379fbaef6c9cbc7
| 14,426
|
py
|
Python
|
tests/unit/test_door.py
|
buxx/rolling
|
ef1268fe6ddabe768a125c3ce8b37e0b9cbad4a5
|
[
"MIT"
] | 14
|
2019-11-16T18:51:51.000Z
|
2022-01-15T17:50:34.000Z
|
tests/unit/test_door.py
|
buxx/rolling
|
ef1268fe6ddabe768a125c3ce8b37e0b9cbad4a5
|
[
"MIT"
] | 148
|
2018-12-10T09:07:45.000Z
|
2022-03-08T10:51:04.000Z
|
tests/unit/test_door.py
|
buxx/rolling
|
ef1268fe6ddabe768a125c3ce8b37e0b9cbad4a5
|
[
"MIT"
] | 1
|
2020-08-05T14:25:48.000Z
|
2020-08-05T14:25:48.000Z
|
from aiohttp.test_utils import TestClient
import pytest
import typing
import unittest.mock
from rolling.kernel import Kernel
from rolling.model.character import CharacterModel
from rolling.model.character import MINIMUM_BEFORE_EXHAUSTED
from rolling.server.document.affinity import AffinityDirectionType
from rolling.server.document.affinity import AffinityJoinType
from rolling.server.document.affinity import CHIEF_STATUS
from rolling.server.document.affinity import MEMBER_STATUS
from rolling.server.document.build import BuildDocument
from rolling.server.document.build import DOOR_MODE_LABELS
from rolling.server.document.build import DOOR_MODE__CLOSED
from rolling.server.document.build import DOOR_MODE__CLOSED_EXCEPT_FOR
from rolling.server.document.build import DoorDocument
@pytest.fixture
def websocket_prepare_mock() -> typing.Generator[unittest.mock.AsyncMock, None, None]:
with unittest.mock.patch("aiohttp.web_ws.WebSocketResponse.prepare") as mock_:
yield mock_
@pytest.fixture
def zone_event_manager_listen_mock() -> typing.Generator[
unittest.mock.AsyncMock, None, None
]:
with unittest.mock.patch(
"rolling.server.zone.websocket.ZoneEventsManager._listen"
) as mock_:
yield mock_
@pytest.fixture
def zone_event_manager_close_mock() -> typing.Generator[
unittest.mock.AsyncMock, None, None
]:
with unittest.mock.patch(
"rolling.server.zone.websocket.ZoneEventsManager.close_websocket"
) as mock_:
yield mock_
@pytest.fixture
def socket_send_str_mock() -> typing.Generator[unittest.mock.AsyncMock, None, None]:
with unittest.mock.patch("aiohttp.web_ws.WebSocketResponse.send_str") as mock_:
yield mock_
class TestDoor:
def _place_door(self, kernel: Kernel) -> DoorDocument:
build = kernel.build_lib.place_build(
world_row_i=1,
world_col_i=1,
zone_row_i=10,
zone_col_i=10,
build_id="DOOR",
under_construction=False,
)
return build
def _create_rule(
self,
kernel: Kernel,
author: CharacterModel,
door: BuildDocument,
mode: str,
affinity_ids: typing.Optional[typing.List[int]],
) -> None:
kernel.door_lib.update(
character_id=author.id,
build_id=door.id,
new_mode=mode,
new_affinity_ids=affinity_ids,
)
def test_one_rule_lock__author_here__stranger_cant(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
def test_one_rule_lock_except__author_here__stranger_cant_but_member_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_franck_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
franck = worldmapc_franck_model
# Given
aff = kernel.affinity_lib.create(
name="aff1",
join_type=AffinityJoinType.ACCEPT_ALL,
direction_type=AffinityDirectionType.ONE_DIRECTOR,
)
kernel.affinity_lib.join(
character_id=xena.id,
affinity_id=aff.id,
accepted=True,
request=False,
status_id=CHIEF_STATUS[0],
)
kernel.affinity_lib.join(
character_id=franck.id,
affinity_id=aff.id,
accepted=True,
request=False,
status_id=MEMBER_STATUS[0],
)
door = self._place_door(kernel)
self._create_rule(
kernel,
author=xena,
door=door,
mode=DOOR_MODE__CLOSED_EXCEPT_FOR,
affinity_ids=[aff.id],
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=franck.id
)
def test_two_rule_lock__author_here_and_first_can__stranger_second_cant(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
self._create_rule(
kernel, author=arthur, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_two_rule_lock__author_first_travel__stranger_second_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
self._create_rule(
kernel, author=arthur, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=2,
to_world_col=2,
)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# When/Then 3
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_first_travel__stranger_second_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=2,
to_world_col=2,
)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# When/Then 3
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_dead__stranger_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
kernel.character_lib.kill(character_id=xena.id)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_vulnerable__stranger_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
xena_doc = kernel.character_lib.get_document(xena.id)
xena_doc.tiredness = MINIMUM_BEFORE_EXHAUSTED + 1
kernel.server_db_session.add(xena_doc)
kernel.server_db_session.commit()
xena = kernel.character_lib.get(id_=xena.id)
assert xena.vulnerable
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
@pytest.mark.usefixtures("websocket_prepare_mock")
@pytest.mark.usefixtures("zone_event_manager_listen_mock")
@pytest.mark.usefixtures("zone_event_manager_close_mock")
async def test_events_when_door_author_left_when_back_in_zone(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
socket_send_str_mock: unittest.mock.AsyncMock,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
request_mock = unittest.mock.AsyncMock()
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
_ = await kernel.server_zone_events_manager.get_new_socket(
request=request_mock,
row_i=1,
col_i=1,
character_id=arthur.id,
)
# When
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=2,
)
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":true}' in event_str for event_str in events_str_list])
# When
socket_send_str_mock.reset_mock()
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":false}' in event_str for event_str in events_str_list])
@pytest.mark.usefixtures("websocket_prepare_mock")
@pytest.mark.usefixtures("zone_event_manager_listen_mock")
@pytest.mark.usefixtures("zone_event_manager_close_mock")
async def test_events_when_door_author_update_rule(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
socket_send_str_mock: unittest.mock.AsyncMock,
worldmapc_web_app: TestClient,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
request_mock = unittest.mock.AsyncMock()
web = worldmapc_web_app
# Given
door = self._place_door(kernel)
_ = await kernel.server_zone_events_manager.get_new_socket(
request=request_mock,
row_i=1,
col_i=1,
character_id=arthur.id,
)
# When
response = await web.post(
f"/character/{xena.id}/door/{door.id}?mode={DOOR_MODE_LABELS[DOOR_MODE__CLOSED]}"
)
assert response.status == 200
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":false}' in event_str for event_str in events_str_list])
| 32.272931
| 93
| 0.643768
| 1,746
| 14,426
| 4.947881
| 0.087629
| 0.036926
| 0.031832
| 0.036115
| 0.835398
| 0.821391
| 0.778447
| 0.774858
| 0.769765
| 0.75819
| 0
| 0.00491
| 0.28005
| 14,426
| 446
| 94
| 32.345291
| 0.826882
| 0.018439
| 0
| 0.633053
| 0
| 0
| 0.037097
| 0.03108
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.02521
| false
| 0
| 0.044818
| 0
| 0.07563
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b8e0455d33253902aeabce67886870561b85812f
| 2,685
|
py
|
Python
|
quantumcat/gates/custom_gates/cirq/__init__.py
|
Artificial-Brain/quantumcat
|
eff99cac7674b3a1b7e1f752e7ebed2b960f85b3
|
[
"Apache-2.0"
] | 20
|
2021-05-10T07:04:41.000Z
|
2021-12-13T17:12:05.000Z
|
quantumcat/gates/custom_gates/cirq/__init__.py
|
Artificial-Brain/quantumcat
|
eff99cac7674b3a1b7e1f752e7ebed2b960f85b3
|
[
"Apache-2.0"
] | 2
|
2021-04-26T05:34:52.000Z
|
2021-05-16T13:46:22.000Z
|
quantumcat/gates/custom_gates/cirq/__init__.py
|
Artificial-Brain/quantumcat
|
eff99cac7674b3a1b7e1f752e7ebed2b960f85b3
|
[
"Apache-2.0"
] | 17
|
2021-04-02T18:09:33.000Z
|
2022-02-10T16:38:57.000Z
|
# (C) Copyright Artificial Brain 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from quantumcat.gates.custom_gates.cirq.u_gate import UGate
from quantumcat.gates.custom_gates.cirq.u1_gate import U1Gate
from quantumcat.gates.custom_gates.cirq.u2_gate import U2Gate
from quantumcat.gates.custom_gates.cirq.u3_gate import U3Gate
from quantumcat.gates.custom_gates.cirq.sdg_gate import SDGGate
from quantumcat.gates.custom_gates.cirq.sxd_gate import SXDGate
from quantumcat.gates.custom_gates.cirq.td_gate import TDGate
from quantumcat.gates.custom_gates.cirq.rxx_gate import RXXGate
from quantumcat.gates.custom_gates.cirq.r_gate import RGate
from quantumcat.gates.custom_gates.cirq.rx_gate import RXGate
from quantumcat.gates.custom_gates.cirq.ry_gate import RYGate
from quantumcat.gates.custom_gates.cirq.ryy_gate import RYYGate
from quantumcat.gates.custom_gates.cirq.rz_gate import RZGate
from quantumcat.gates.custom_gates.cirq.rccx_gate import RCCXGate
from quantumcat.gates.custom_gates.cirq.rc3x_gate import RC3XGate
from quantumcat.gates.custom_gates.cirq.rzz_gate import RZZGate
from quantumcat.gates.custom_gates.cirq.rzx_gate import RZXGate
from quantumcat.gates.custom_gates.cirq.sx_gate import SXGate
from quantumcat.gates.custom_gates.cirq.cy_gate import CYGate
from quantumcat.gates.custom_gates.cirq.p_gate import PGate
from quantumcat.gates.custom_gates.cirq.cu_gate import CUGate
from quantumcat.gates.custom_gates.cirq.cu1_gate import CU1Gate
from quantumcat.gates.custom_gates.cirq.cu3_gate import CU3Gate
from quantumcat.gates.custom_gates.cirq.crx_gate import CRXGate
from quantumcat.gates.custom_gates.cirq.cry_gate import CRYGate
from quantumcat.gates.custom_gates.cirq.crz_gate import CRZGate
from quantumcat.gates.custom_gates.cirq.dcx_gate import DCXGate
from quantumcat.gates.custom_gates.cirq.c3x_gate import C3XGate
from quantumcat.gates.custom_gates.cirq.c4x_gate import C4XGate
from quantumcat.gates.custom_gates.cirq.c3sx_gate import C3SXGate
from quantumcat.gates.custom_gates.cirq.cphase_gate import CPhaseGate
from quantumcat.gates.custom_gates.cirq.csx_gate import CSXGate
from quantumcat.gates.custom_gates.cirq.ch_gate import CHGate
| 55.9375
| 75
| 0.84581
| 421
| 2,685
| 5.23753
| 0.325416
| 0.209524
| 0.284354
| 0.37415
| 0.508844
| 0.508844
| 0
| 0
| 0
| 0
| 0
| 0.010643
| 0.09013
| 2,685
| 47
| 76
| 57.12766
| 0.891936
| 0.211173
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
770aad7e1ff56e67c95983849d2bf6bbbc1649fe
| 284
|
py
|
Python
|
slackwebhook/__init__.py
|
FoundryGroup/Slack-Webhook
|
1a71f68eec876684ffaa7ba936bbc099f55dfb81
|
[
"MIT"
] | null | null | null |
slackwebhook/__init__.py
|
FoundryGroup/Slack-Webhook
|
1a71f68eec876684ffaa7ba936bbc099f55dfb81
|
[
"MIT"
] | null | null | null |
slackwebhook/__init__.py
|
FoundryGroup/Slack-Webhook
|
1a71f68eec876684ffaa7ba936bbc099f55dfb81
|
[
"MIT"
] | null | null | null |
################################################################################
# Python package __init__.py file.
#
# Author: Carl Cortright
# Date: 12/20/2016
#
################################################################################
from slackwebhook import slackwebhook
| 28.4
| 80
| 0.323944
| 16
| 284
| 5.5
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030418
| 0.073944
| 284
| 9
| 81
| 31.555556
| 0.304183
| 0.253521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
770c52f41e079a4cb403bba6dcadc3852fc8a850
| 231
|
py
|
Python
|
job_scheduler/cache/__init__.py
|
konkolorado/job-scheduler
|
e76b24d0592d9d1f62b5a1525b6a152b9983b2fa
|
[
"MIT"
] | null | null | null |
job_scheduler/cache/__init__.py
|
konkolorado/job-scheduler
|
e76b24d0592d9d1f62b5a1525b6a152b9983b2fa
|
[
"MIT"
] | null | null | null |
job_scheduler/cache/__init__.py
|
konkolorado/job-scheduler
|
e76b24d0592d9d1f62b5a1525b6a152b9983b2fa
|
[
"MIT"
] | 1
|
2021-08-09T15:28:49.000Z
|
2021-08-09T15:28:49.000Z
|
from job_scheduler.cache.base import ScheduleCache
from job_scheduler.cache.fake import FakeScheduleCache
from job_scheduler.cache.redis import RedisScheduleCache
all = ["ScheduleCache", "RedisScheduleCache", "FakeScheduleCache"]
| 38.5
| 66
| 0.848485
| 25
| 231
| 7.72
| 0.48
| 0.108808
| 0.248705
| 0.326425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077922
| 231
| 5
| 67
| 46.2
| 0.906103
| 0
| 0
| 0
| 0
| 0
| 0.207792
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7722bc9189fc79c029275036a7e49a54482e4d8c
| 38
|
py
|
Python
|
pkg/agents/team4/trainingAgent/findBestConfigs.py
|
SOMAS2021/SOMAS2021
|
acaa13e3d663d3f59589f3b26860db643b3bf29e
|
[
"MIT"
] | 13
|
2021-12-02T09:28:47.000Z
|
2022-01-14T18:39:51.000Z
|
pkg/agents/team4/trainingAgent/findBestConfigs.py
|
SOMAS2021/SOMAS2021
|
acaa13e3d663d3f59589f3b26860db643b3bf29e
|
[
"MIT"
] | 190
|
2021-11-19T15:37:44.000Z
|
2022-01-17T00:23:13.000Z
|
pkg/agents/team4/trainingAgent/findBestConfigs.py
|
SOMAS2021/SOMAS2021
|
acaa13e3d663d3f59589f3b26860db643b3bf29e
|
[
"MIT"
] | 4
|
2021-11-22T18:21:53.000Z
|
2021-12-22T13:55:42.000Z
|
# TODO: autmatate finding best agents
| 19
| 37
| 0.789474
| 5
| 38
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 38
| 1
| 38
| 38
| 0.9375
| 0.921053
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
773a351110e170920b1633be885fbe44c1c4b850
| 4,127
|
py
|
Python
|
examples/sudoku/sudoku_cores.py
|
SRI-CSL/yices2_python_bindings
|
ff48993b6f620605afce12741f9afede94238627
|
[
"MIT"
] | 8
|
2018-09-19T00:42:45.000Z
|
2022-03-25T12:22:01.000Z
|
examples/sudoku/sudoku_cores.py
|
SRI-CSL/yices2_python_bindings
|
ff48993b6f620605afce12741f9afede94238627
|
[
"MIT"
] | 4
|
2020-06-05T21:44:14.000Z
|
2021-12-06T17:24:31.000Z
|
examples/sudoku/sudoku_cores.py
|
SRI-CSL/yices2_python_bindings
|
ff48993b6f620605afce12741f9afede94238627
|
[
"MIT"
] | 3
|
2020-07-10T18:15:01.000Z
|
2020-12-16T09:50:02.000Z
|
#!/usr/bin/env python
"""Using unsat cores to give hints."""
from SudokuLib import Puzzle
from Solver import Solver
from yices.Yices import Yices
from yices.Census import Census
puzzle_blank = [
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
#
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
#
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
puzzle_1 = [
[ 0, 6, 0, 0, 0, 8, 0, 7, 3],
[ 0, 0, 2, 0, 0, 0, 0, 4, 0],
[ 5, 0, 0, 0, 6, 0, 0, 0, 0],
#
[ 0, 0, 0, 6, 0, 2, 0, 0, 5],
[ 0, 0, 4, 0, 0, 0, 1, 0, 0],
[ 6, 0, 0, 8, 0, 7, 0, 0, 0],
#
[ 0, 0, 0, 0, 7, 0, 0, 0, 1],
[ 0, 5, 0, 0, 0, 0, 3, 0, 0],
[ 4, 3, 0, 1, 0, 0, 0, 8, 0],
]
# puzzle_2 come from here:
# https://puzzling.stackexchange.com/questions/29/what-are-the-criteria-for-determining-the-difficulty-of-sudoku-puzzle
# where it is claimed to be the "hardest sudoku in the world"
# but in fact is not a valid sudoku since it has more than one solution. tut tut.
# I added it to one of the predefined boards ('escargot') of SudokuSensei and
# it has 29 non isomorphic models (aka solutions).
puzzle_ai_escargot = [
[ 1, 0, 0, 0, 0, 7, 0, 9, 0],
[ 0, 3, 0, 0, 2, 0, 0, 0, 8],
[ 0, 0, 9, 6, 0, 0, 5, 0, 0],
#
[ 0, 0, 5, 3, 0, 0, 9, 0, 0],
[ 0, 1, 0, 0, 8, 0, 0, 0, 2],
[ 6, 0, 0, 0, 0, 4, 0, 0, 0],
#
[ 3, 0, 0, 0, 0, 0, 0, 1, 0],
[ 0, 4, 0, 0, 0, 0, 0, 0, 7],
[ 0, 0, 7, 0, 0, 0, 0, 3, 0],
]
extreme_1 = [
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 2, 0, 0, 7, 1, 5, 0],
[ 4, 0, 0, 0, 0, 9, 3, 0, 6],
#
[ 0, 1, 0, 0, 0, 3, 0, 0, 5],
[ 0, 0, 0, 5, 2, 4, 0, 0, 0],
[ 3, 0, 0, 7, 0, 0, 0, 6, 0],
#
[ 1, 0, 7, 6, 0, 0, 0, 0, 9],
[ 0, 5, 6, 8, 0, 0, 4, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
extreme_2 = [
[ 0, 0, 0, 0, 0, 0, 7, 0, 3],
[ 0, 0, 6, 0, 0, 8, 5, 4, 0],
[ 5, 0, 0, 0, 7, 0, 0, 0, 0],
#
[ 0, 1, 9, 0, 0, 4, 8, 0, 0],
[ 7, 0, 0, 0, 0, 0, 0, 0, 9],
[ 0, 0, 8, 9, 0, 0, 2, 1, 0],
#
[ 0, 0, 0, 0, 5, 0, 0, 0, 2],
[ 0, 5, 7, 3, 0, 0, 1, 0, 0],
[ 4, 0, 3, 0, 0, 0, 0, 0, 0],
]
extreme_3 = [
[ 8, 0, 1, 0, 9, 0, 0, 0, 0],
[ 0, 7, 2, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 3, 0, 0, 8, 0, 0],
#
[ 5, 0, 0, 1, 0, 0, 0, 4, 0],
[ 1, 0, 0, 0, 3, 0, 0, 0, 9],
[ 0, 2, 0, 0, 0, 7, 0, 0, 5],
#
[ 0, 0, 5, 0, 0, 2, 0, 0, 0],
[ 0, 0, 0, 4, 0, 0, 5, 9, 0],
[ 0, 0, 0, 0, 8, 0, 4, 0, 3],
]
extreme_4 = [
[ 7, 0, 0, 0, 0, 4, 0, 5, 0],
[ 0, 0, 0, 5, 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 0, 6, 0, 7, 8],
#
[ 0, 0, 4, 0, 0, 0, 8, 0, 0],
[ 3, 5, 0, 0, 8, 0, 0, 1, 9],
[ 0, 0, 8, 0, 0, 0, 2, 0, 0],
#
[ 5, 4, 0, 1, 0, 0, 0, 0, 0],
[ 0, 0, 6, 0, 0, 5, 0, 0, 0],
[ 0, 8, 0, 9, 0, 0, 0, 0, 1],
]
#https://www.conceptispuzzles.com/index.aspx?uri=info/article/424
hardest = [
[ 8, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 3, 6, 0, 0, 0, 0, 0],
[ 0, 7, 0, 0, 9, 0, 2, 0, 0],
#
[ 0, 5, 0, 0, 0, 7, 0, 0, 0],
[ 0, 0, 0, 0, 4, 5, 7, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 3, 0],
#
[ 0, 0, 1, 0, 0, 0, 0, 6, 8],
[ 0, 0, 8, 5, 0, 0, 0, 1, 0],
[ 0, 9, 0, 0, 0, 0, 4, 0, 0],
]
def analyze(rawpuzzle, name):
puzzle = Puzzle(rawpuzzle)
print(f'\nPuzzle ({name}):\n')
puzzle.pprint()
solver = Solver(puzzle)
solution = solver.solve()
if solution is not None:
print(f'\nSolution ({name}):\n')
solution.pprint()
#<experimental zone>
simplest = solver.filter_cores(solution)
if simplest is not None:
solver.show_hints(simplest)
#</experimental zone>
def main():
analyze(puzzle_1, "evil")
analyze(extreme_1, "extreme #1")
analyze(extreme_2, "extreme #2")
analyze(extreme_3, "extreme #3")
analyze(extreme_4, "extreme #4")
analyze(hardest, "hardest")
if __name__ == '__main__':
main()
print(Census.dump())
Yices.exit(True)
| 24.565476
| 119
| 0.414587
| 863
| 4,127
| 1.954809
| 0.127462
| 0.403082
| 0.412567
| 0.403082
| 0.367516
| 0.339656
| 0.272081
| 0.19917
| 0.161826
| 0.112033
| 0
| 0.246777
| 0.342137
| 4,127
| 167
| 120
| 24.712575
| 0.374586
| 0.136661
| 0
| 0.095652
| 0
| 0
| 0.028596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017391
| false
| 0
| 0.034783
| 0
| 0.052174
| 0.043478
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91f204cefc1e11f78d143865718a0720e6b49302
| 135
|
py
|
Python
|
libs/yowsup/yowsup/yowsup/layers/axolotl/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 22
|
2017-07-14T20:01:17.000Z
|
2022-03-08T14:22:39.000Z
|
libs/yowsup/yowsup/yowsup/layers/axolotl/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 6
|
2017-07-14T21:03:50.000Z
|
2021-06-10T19:08:32.000Z
|
libs/yowsup/yowsup/yowsup/layers/axolotl/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 13
|
2017-07-14T20:13:14.000Z
|
2020-11-12T08:06:05.000Z
|
from .layer_send import AxolotlSendLayer
from .layer_control import AxolotlControlLayer
from .layer_receive import AxolotlReceivelayer
| 33.75
| 46
| 0.888889
| 15
| 135
| 7.8
| 0.6
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 135
| 3
| 47
| 45
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62148220d3b68cf5b490d8e272125fd66f2e326e
| 12,455
|
py
|
Python
|
src/metarl/envs/multi_env_wrapper.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 2
|
2020-03-15T14:35:15.000Z
|
2021-02-15T16:38:00.000Z
|
src/metarl/envs/multi_env_wrapper.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | null | null | null |
src/metarl/envs/multi_env_wrapper.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 1
|
2020-02-24T03:04:23.000Z
|
2020-02-24T03:04:23.000Z
|
"""A wrapper env that handles multiple tasks from different envs.
Useful while training multi-task reinforcement learning algorithms.
It provides observations augmented with one-hot representation of tasks.
"""
import random
import akro
import gym
import numpy as np
def round_robin_strategy(num_tasks, last_task=None):
"""A function for sampling tasks in round robin fashion.
Args:
num_tasks (int): Total number of tasks.
last_task (int): Previously sampled task.
Returns:
int: task id.
"""
if last_task is None:
return 0
return (last_task + 1) % num_tasks
def uniform_random_strategy(num_tasks, _):
"""A function for sampling tasks uniformly at random.
Args:
num_tasks (int): Total number of tasks.
_ (object): Ignored by this sampling strategy.
Returns:
int: task id.
"""
return random.randint(0, num_tasks - 1)
class MultiEnvWrapper(gym.Wrapper):
"""A wrapper class to handle multiple gym environments.
Args:
envs (list(gym.Env)):
A list of objects implementing gym.Env.
sample_strategy (function(int, int)):
Sample strategy to be used when sampling a new task.
"""
def __init__(self, envs, task_name=None, sample_strategy=uniform_random_strategy):
self._sample_strategy = sample_strategy
self._num_tasks = len(envs)
self._active_task_index = None
self._observation_space = None
self._envs_names_list = task_name or dict()
max_flat_dim = np.prod(envs[0].observation_space.shape)
for i, env in enumerate(envs):
assert len(env.observation_space.shape) == 1
if np.prod(env.observation_space.shape) >= max_flat_dim:
self.max_observation_space_index = i
max_flat_dim = np.prod(env.observation_space.shape)
self._max_plain_dim = max_flat_dim
super().__init__(envs[self.max_observation_space_index])
self._task_envs = []
for env in envs:
if env.action_space.shape != self.env.action_space.shape:
raise ValueError('Action space of all envs should be same.')
self._task_envs.append(env)
self.spec.observation_space = self.observation_space
@property
def num_tasks(self):
"""Total number of tasks.
Returns:
int: number of tasks.
"""
return len(self._task_envs)
@property
def task_space(self):
"""Task Space.
Returns:
akro.Box: Task space.
"""
one_hot_ub = np.ones(self.num_tasks)
one_hot_lb = np.zeros(self.num_tasks)
return akro.Box(one_hot_lb, one_hot_ub)
@property
def active_task_index(self):
"""Index of active task env.
Returns:
int: Index of active task.
"""
return self._active_task_index
@property
def observation_space(self):
"""Observation space.
Returns:
akro.Box: Observation space.
"""
task_lb, task_ub = self.task_space.bounds
env_lb, env_ub = self._observation_space.bounds
return akro.Box(np.concatenate([task_lb, env_lb]),
np.concatenate([task_ub, env_ub]))
@observation_space.setter
def observation_space(self, observation_space):
"""Observation space setter.
Args:
observation_space (akro.Box): Observation space.
"""
self._observation_space = observation_space
@property
def active_task_one_hot(self):
"""One-hot representation of active task.
Returns:
numpy.ndarray: one-hot representation of active task
"""
one_hot = np.zeros(self.task_space.shape)
index = self.active_task_index or 0
one_hot[index] = self.task_space.high[index]
return one_hot
def reset(self, **kwargs):
"""Sample new task and call reset on new task env.
Args:
kwargs (dict): Keyword arguments to be passed to gym.Env.reset
Returns:
numpy.ndarray: active task one-hot representation + observation
"""
self._active_task_index = self._sample_strategy(
self._num_tasks, self._active_task_index)
self.env = self._task_envs[self._active_task_index]
obs = self.env.reset(**kwargs)
obs = self._augment_observation(obs)
oh_obs = self._obs_with_one_hot(obs)
return oh_obs
def _augment_observation(self, obs):
# optionally zero-pad observation
if np.prod(obs.shape) < self._max_plain_dim:
zeros = np.zeros(
shape=(self._max_plain_dim - np.prod(obs.shape),)
)
obs = np.concatenate([obs, zeros])
return obs
def step(self, action):
"""gym.Env step for the active task env.
Args:
action (object): object to be passed in gym.Env.reset(action)
Returns:
object: agent's observation of the current environment
float: amount of reward returned after previous action
bool: whether the episode has ended
dict: contains auxiliary diagnostic information
"""
obs, reward, done, info = self.env.step(action)
obs = self._augment_observation(obs)
oh_obs = self._obs_with_one_hot(obs)
info['task_id'] = self._active_task_index
info['task_name'] = self._envs_names_list[self._active_task_index]
return oh_obs, reward, done, info
def close(self):
"""Close all task envs."""
for env in self._task_envs:
env.close()
def _obs_with_one_hot(self, obs):
"""Concatenate active task one-hot representation with observation.
Args:
obs (numpy.ndarray): observation
Returns:
numpy.ndarray: active task one-hot + observation
"""
oh_obs = np.concatenate([self.active_task_one_hot, obs])
return oh_obs
# """A wrapper env that handles multiple tasks from different envs.
# Useful while training multi-task reinforcement learning algorithms.
# It provides observations augmented with one-hot representation of tasks.
# """
# import random
# import akro
# import gym
# import numpy as np
# def round_robin_strategy(num_tasks, last_task=None):
# """A function for sampling tasks in round robin fashion.
# Args:
# num_tasks (int): Total number of tasks.
# last_task (int): Previously sampled task.
# Returns:
# int: task id.
# """
# if last_task is None:
# return 0
# return (last_task + 1) % num_tasks
# def uniform_random_strategy(num_tasks, _):
# """A function for sampling tasks uniformly at random.
# Args:
# num_tasks (int): Total number of tasks.
# _ (object): Ignored by this sampling strategy.
# Returns:
# int: task id.
# """
# return random.randint(0, num_tasks - 1)
# class MultiEnvWrapper(gym.Wrapper):
# """A wrapper class to handle multiple gym environments.
# Args:
# envs (list(gym.Env)):
# A list of objects implementing gym.Env.
# sample_strategy (function(int, int)):
# Sample strategy to be used when sampling a new task.
# """
# def __init__(self, envs, sample_strategy=uniform_random_strategy):
# self._sample_strategy = sample_strategy
# self._num_tasks = len(envs)
# self._active_task_index = None
# self._observation_space = None
# max_flat_dim = np.prod(envs[0].observation_space.shape)
# max_observation_space_index = 0
# for i, env in enumerate(envs):
# assert len(env.observation_space.shape) == 1
# if np.prod(env.observation_space.shape) >= max_flat_dim:
# self.max_observation_space_index = i
# max_flat_dim = np.prod(env.observation_space.shape)
# self._max_plain_dim = max_flat_dim
# super().__init__(envs[self.max_observation_space_index])
# self._task_envs = []
# for i, env in enumerate(envs):
# if env.action_space.shape != self.env.action_space.shape:
# raise ValueError('Action space of all envs should be same.')
# self._task_envs.append(env)
# self.env.spec.observation_space = self._task_envs[self.max_observation_space_index].observation_space
# @property
# def num_tasks(self):
# """Total number of tasks.
# Returns:
# int: number of tasks.
# """
# return len(self._task_envs)
# @property
# def task_space(self):
# """Task Space.
# Returns:
# akro.Box: Task space.
# """
# one_hot_ub = np.ones(self.num_tasks)
# one_hot_lb = np.zeros(self.num_tasks)
# return akro.Box(one_hot_lb, one_hot_ub)
# @property
# def active_task_index(self):
# """Index of active task env.
# Returns:
# int: Index of active task.
# """
# return self._active_task_index
# @property
# def observation_space(self):
# """Observation space.
# Returns:
# akro.Box: Observation space.
# """
# task_lb, task_ub = self.task_space.bounds
# env_lb, env_ub = self._observation_space.bounds
# return akro.Box(np.concatenate([task_lb, env_lb]),
# np.concatenate([task_ub, env_ub]))
# @observation_space.setter
# def observation_space(self, observation_space):
# """Observation space setter.
# Args:
# observation_space (akro.Box): Observation space.
# """
# self._observation_space = observation_space
# @property
# def active_task_one_hot(self):
# """One-hot representation of active task.
# Returns:
# numpy.ndarray: one-hot representation of active task
# """
# one_hot = np.zeros(self.task_space.shape)
# index = self.active_task_index or 0
# one_hot[index] = self.task_space.high[index]
# return one_hot
# def reset(self, **kwargs):
# """Sample new task and call reset on new task env.
# Args:
# kwargs (dict): Keyword arguments to be passed to gym.Env.reset
# Returns:
# numpy.ndarray: active task one-hot representation + observation
# """
# self._active_task_index = self._sample_strategy(
# self._num_tasks, self._active_task_index)
# self.env = self._task_envs[self._active_task_index]
# obs = self.env.reset(**kwargs)
# obs = self._augment_observation(obs)
# oh_obs = self._obs_with_one_hot(obs)
# return oh_obs
# def step(self, action):
# """gym.Env step for the active task env.
# Args:
# action (object): object to be passed in gym.Env.reset(action)
# Returns:
# object: agent's observation of the current environment
# float: amount of reward returned after previous action
# bool: whether the episode has ended
# dict: contains auxiliary diagnostic information
# """
# obs, reward, done, info = self.env.step(action)
# obs = self._augment_observation(obs)
# oh_obs = self._obs_with_one_hot(obs)
# info['task_id'] = self._active_task_index
# return oh_obs, reward, done, info
# def _augment_observation(self, obs):
# # optionally zero-pad observation
# if np.prod(obs.shape) < self._max_plain_dim:
# zeros = np.zeros(
# shape=(self._max_plain_dim - np.prod(obs.shape),)
# )
# obs = np.concatenate([obs, zeros])
# return obs
# def close(self):
# """Close all task envs."""
# for env in self._task_envs:
# env.close()
# def _obs_with_one_hot(self, obs):
# """Concatenate active task one-hot representation with observation.
# Args:
# obs (numpy.ndarray): observation
# Returns:
# numpy.ndarray: active task one-hot + observation
# """
# oh_obs = np.concatenate([self.active_task_one_hot, obs])
# return oh_obs
| 29.305882
| 111
| 0.607226
| 1,529
| 12,455
| 4.71223
| 0.103336
| 0.09771
| 0.033033
| 0.039556
| 0.981679
| 0.97821
| 0.971686
| 0.971686
| 0.971686
| 0.971686
| 0
| 0.001709
| 0.295464
| 12,455
| 424
| 112
| 29.375
| 0.819373
| 0.640787
| 0
| 0.129412
| 0
| 0
| 0.014504
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 1
| 0.164706
| false
| 0
| 0.047059
| 0
| 0.364706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
627f3994b2ade29fca362ec30f86cac34a2baa81
| 152
|
py
|
Python
|
resolver/mindeps/__init__.py
|
Shivansh-007/python-resolver
|
c44e93e0715d6d7a736db17122e6a606267329b2
|
[
"MIT"
] | null | null | null |
resolver/mindeps/__init__.py
|
Shivansh-007/python-resolver
|
c44e93e0715d6d7a736db17122e6a606267329b2
|
[
"MIT"
] | null | null | null |
resolver/mindeps/__init__.py
|
Shivansh-007/python-resolver
|
c44e93e0715d6d7a736db17122e6a606267329b2
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
from resolver.mindeps.__main__ import entrypoint, get_min_deps # noqa: F401
__all__ = ('entrypoint', 'get_min_deps')
| 21.714286
| 76
| 0.763158
| 20
| 152
| 5.2
| 0.8
| 0.25
| 0.307692
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022556
| 0.125
| 152
| 6
| 77
| 25.333333
| 0.759399
| 0.256579
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6577dab5ac11bbb023397905f08425826f206066
| 176
|
py
|
Python
|
backend/university/admin.py
|
andriyandrushko0/univowl
|
da613316021f7b41b133b5b6e360cc6b9db60504
|
[
"MIT"
] | null | null | null |
backend/university/admin.py
|
andriyandrushko0/univowl
|
da613316021f7b41b133b5b6e360cc6b9db60504
|
[
"MIT"
] | null | null | null |
backend/university/admin.py
|
andriyandrushko0/univowl
|
da613316021f7b41b133b5b6e360cc6b9db60504
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(University)
admin.site.register(Faculty)
admin.site.register(Subject)
admin.site.register(Teacher)
| 19.555556
| 32
| 0.8125
| 24
| 176
| 5.958333
| 0.5
| 0.251748
| 0.475524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079545
| 176
| 8
| 33
| 22
| 0.882716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
659182ecb712f24f0371757649f6618c51a53b68
| 193
|
py
|
Python
|
Server/prediction/admin.py
|
mohanj098/Item-Price-Forecasting
|
14fc787ad4d9dcc6af03b43fa5e866cd254a99f5
|
[
"MIT"
] | null | null | null |
Server/prediction/admin.py
|
mohanj098/Item-Price-Forecasting
|
14fc787ad4d9dcc6af03b43fa5e866cd254a99f5
|
[
"MIT"
] | 2
|
2021-03-15T15:53:22.000Z
|
2021-05-03T09:32:34.000Z
|
Server/prediction/admin.py
|
mohanj098/Item-Price-Forecasting
|
14fc787ad4d9dcc6af03b43fa5e866cd254a99f5
|
[
"MIT"
] | 1
|
2021-05-04T15:35:06.000Z
|
2021-05-04T15:35:06.000Z
|
from django.contrib import admin
from prediction.models import product
from prediction.models import price
# Register your models here.
admin.site.register(product)
admin.site.register(price)
| 24.125
| 37
| 0.829016
| 27
| 193
| 5.925926
| 0.481481
| 0.175
| 0.25
| 0.325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103627
| 193
| 7
| 38
| 27.571429
| 0.924855
| 0.134715
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
65cdd034fed36877b4031f60332f1c40cdb5f6a5
| 2,224
|
py
|
Python
|
tools/python-mock-server/python-mock-server.py
|
msmagnanijr/jboss-kie-modules
|
1ab85aa12e70db810a4d607fb6aaa85a19bb8607
|
[
"Apache-2.0"
] | 8
|
2018-07-20T02:32:39.000Z
|
2022-03-27T10:52:55.000Z
|
tools/python-mock-server/python-mock-server.py
|
msmagnanijr/jboss-kie-modules
|
1ab85aa12e70db810a4d607fb6aaa85a19bb8607
|
[
"Apache-2.0"
] | 167
|
2017-12-19T14:33:35.000Z
|
2022-03-22T11:47:20.000Z
|
tools/python-mock-server/python-mock-server.py
|
msmagnanijr/jboss-kie-modules
|
1ab85aa12e70db810a4d607fb6aaa85a19bb8607
|
[
"Apache-2.0"
] | 52
|
2017-12-18T13:55:24.000Z
|
2022-02-09T14:07:14.000Z
|
#!/usr/bin/python3
import os
import sys
from http.server import HTTPServer, BaseHTTPRequestHandler
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
# do not change paths
if self.path == '/apis/apps.openshift.io/v1/namespaces/testNamespace/deploymentconfigs?labelSelector=services.server.kie.org%2Fkie-server-id%3Drhpam-kieserevr-scale-up':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
test = os.path.join(sys.path[0], "responses/kieserver-dc.json")
response = open(test, "r").read()
self.wfile.write(response.encode(encoding='utf_8'))
# do not change paths
if self.path == '/apis/apps.openshift.io/v1/namespaces/testNamespace/deploymentconfigs?labelSelector=services.server.kie.org%2Fkie-server-id%3Drhpam-kieserevr-scale-down':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
test = os.path.join(sys.path[0], "responses/kieserver-dc-0-replicas.json")
response = open(test, "r").read()
self.wfile.write(response.encode(encoding='utf_8'))
if self.path == '/apis/apps.openshift.io/v1/namespaces/testNamespace/deploymentconfigs/rhpam-central-console':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
test = os.path.join(sys.path[0], "responses/bc-dc.json")
response = open(test, "r").read()
self.wfile.write(response.encode(encoding='utf_8'))
if self.path == '/halt':
print("Halting server")
self.send_response(200)
self.end_headers()
sys.exit()
# for patch method, only return 200 for any path
def do_PATCH(self):
self.send_response(200)
# for put method, only return 200 for any path
def do_PUT(self):
self.send_response(200)
# for put method, only return 200 for any path
def do_DELETE(self):
self.send_response(200)
httpd = HTTPServer(("localhost", 8080), MyHandler)
httpd.serve_forever()
| 37.694915
| 179
| 0.642536
| 279
| 2,224
| 5.043011
| 0.315412
| 0.056859
| 0.079602
| 0.094527
| 0.800284
| 0.767591
| 0.767591
| 0.767591
| 0.767591
| 0.743426
| 0
| 0.028522
| 0.227518
| 2,224
| 58
| 180
| 38.344828
| 0.790454
| 0.08723
| 0
| 0.512821
| 0
| 0.076923
| 0.300544
| 0.226396
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.076923
| 0
| 0.205128
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
02a84eeec97777f61185766e05077d7532adafbc
| 232
|
py
|
Python
|
pyramda/logic/any_pass.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 124
|
2015-07-30T21:34:25.000Z
|
2022-02-19T08:45:50.000Z
|
pyramda/logic/any_pass.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 37
|
2015-08-31T23:02:20.000Z
|
2022-02-04T04:45:28.000Z
|
pyramda/logic/any_pass.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 20
|
2015-08-04T18:59:09.000Z
|
2021-12-13T08:08:59.000Z
|
from pyramda.function.curry import curry
from pyramda.function.always import always
from pyramda.iterable.reduce import reduce
from .either import either
@curry
def any_pass(ps, v):
return reduce(either, always(False), ps)(v)
| 23.2
| 47
| 0.784483
| 35
| 232
| 5.171429
| 0.457143
| 0.18232
| 0.209945
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12931
| 232
| 9
| 48
| 25.777778
| 0.89604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.142857
| 0.571429
| 0.142857
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
|
0
| 6
|
f323bb4c6d1d42af8adea82f66966d109724eba9
| 29,495
|
py
|
Python
|
api/fileupload.py
|
subhendu01/Audio-FIle-Server
|
6c7f9a093e41f0750a0a8c4c1f0e48608215c8a6
|
[
"MIT"
] | 5
|
2021-05-12T18:18:49.000Z
|
2022-01-06T12:35:35.000Z
|
api/fileupload.py
|
subhendu01/Audio-FIle-Server
|
6c7f9a093e41f0750a0a8c4c1f0e48608215c8a6
|
[
"MIT"
] | null | null | null |
api/fileupload.py
|
subhendu01/Audio-FIle-Server
|
6c7f9a093e41f0750a0a8c4c1f0e48608215c8a6
|
[
"MIT"
] | null | null | null |
import datetime, os, base64
from flask import Flask, jsonify, request, Blueprint
from dbstore import dbconf
import json
from bson import json_util
# process kill
# lsof -i tcp:3000
file_upload = Blueprint('uploadAPI', __name__)
app = Flask(__name__)
def song_upload(val):
try:
# content = request.get_json()
curs = dbconf.file_store.find().sort( [("_id", -1)] ).limit(1)
if curs.count() > 0:
for rec in curs:
id_val = rec["audioFileMetadata"]["id"]
id = id_val + 1
else:
id = 1
audio_file_id = int(val["audio_file_id"])
cursor_file_id = dbconf.file_store.find({'audioFileMetadata.audio_file_id': audio_file_id})
if cursor_file_id.count() == 0:
song_name = str(val['song_name'])
duration_sec = int(val['duration_sec'])
upload_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if len(song_name) != 0 and len(song_name) <= 100:
if duration_sec >= 0:
msg = "Successful"
response = {
"status": 200,
"msg": msg,
"id": id,
"song_name": song_name,
"duration_sec": duration_sec,
"upload_time": upload_time,
"audio_file_id": audio_file_id
}
else:
msg = "Duration should be positive integer number"
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Song name should be between 0 to 100 characters"
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Duplicate audio id found."
response = {
"status": 400,
"msg": msg
}
return response
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return response
def podcast_upload(val):
try:
curs = dbconf.file_store.find().sort( [("_id", -1)] ).limit(1)
if curs.count() > 0:
for rec in curs:
id_val = rec["audioFileMetadata"]["id"]
id = id_val + 1
else:
id = 1
audio_file_id = int(val["audio_file_id"])
cursor_file_id = dbconf.file_store.find({'audioFileMetadata.audio_file_id': audio_file_id})
if cursor_file_id.count() == 0:
podcast_name = str(val['podcast_name'])
duration_sec = int(val['duration_sec'])
upload_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
host = str(val['host'])
participant = val['participant']
# print(id, podcast_name, duration_sec, upload_time, host, participant)
if len(podcast_name) <= 100:
if duration_sec >= 0:
exceed_leng = [ x for x in participant if len(x) >= 100]
if len(participant) <= 10 and len(exceed_leng) == 0:
if len(host) <= 100:
msg = "sucessful"
response = {
"status": 200,
"msg": msg,
"id": id,
"podcast_name": podcast_name,
"duration_sec": duration_sec,
"upload_time": upload_time,
"host": host,
"participant": participant,
"audio_file_id": audio_file_id
}
else:
msg = "Host cannot be larger than 100 characters."
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Each string cannot be larger than 100 characters, maximum of 10 participants possible"
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Duration should be positive integer number"
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Name of the podcast cannot be larger than 100 characters."
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Duplicate audio id found."
response = {
"status": 400,
"msg": msg
}
return response
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return response
def audiobook_upload(val):
try:
# content = request.get_json()
curs = dbconf.file_store.find().sort( [("_id", -1)]).limit(1)
if curs.count() > 0:
for rec in curs:
id_val = rec["audioFileMetadata"]["id"]
id = id_val + 1
else:
id = 1
audio_file_id = int(val["audio_file_id"])
cursor_file_id = dbconf.file_store.find({'audioFileMetadata.audio_file_id': audio_file_id})
if cursor_file_id.count() == 0:
audiobook_title = str(val['audiobook_title'])
author_title = str(val['author_title'])
narrator = str(val['narrator'])
duration_sec = int(val['duration_sec'])
upload_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if len(audiobook_title) <= 100 and len(audiobook_title) != 0:
if len(author_title) <= 100 and len(author_title) != 0:
if len(narrator) <=100 and len(narrator) != 0:
if duration_sec >= 0:
msg = "sucessful"
response = {
"status": 200,
"msg": msg,
"id": id,
"audiobook_title": audiobook_title,
"author_title": author_title,
"narrator": narrator,
"duration_sec": duration_sec,
"upload_time": upload_time,
"audio_file_id": audio_file_id
}
else:
msg = "Duration should be positive integer number"
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Narrator should be between 0 to 100 characters."
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Author title should be between 0 to 100 characters."
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Audiobook should be between 0 to 100 characters."
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Duplicate audio id found."
response = {
"status": 400,
"msg": msg
}
return response
except Exception as e:
print(str(e))
msg = "Something went wrong."
response = {
"status": 500,
"msg": msg
}
return response
@file_upload.route('api/_create', methods= ['POST'])
def create():
try:
if request.method == "POST":
#getting all the parameters
content = request.get_json()
# print(content)
audioFileType = content['audioFileType']
#for song type
if audioFileType.lower() == 'song':
audioFileMetadata = "song"
#calling the song-upload method for song type
func_call = song_upload(content['audioFileMetadata'])
if func_call["status"] == 200:
audioFileMetadata = {
"duration_sec": func_call["duration_sec"],
"id": func_call["id"],
"song_name": func_call['song_name'],
"upload_time": func_call['upload_time'],
"audio_file_id": func_call['audio_file_id']
}
rec = {
"audioFileType": audioFileType.lower(),
"audioFileMetadata": audioFileMetadata
}
# insert the data into collection
data = json.loads(json_util.dumps(rec))
dbconf.file_store.insert(rec)
response = {
"status": func_call["status"],
"msg": func_call["msg"],
"record": data
}
# print(response)
elif func_call["status"] == 400:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
elif func_call["status"] == 500:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
#for podcast type
elif audioFileType.lower() == 'podcast':
audioFileMetadata = "podcast"
func_call = podcast_upload(content['audioFileMetadata'])
if func_call["status"] == 200:
audioFileMetadata = {
"podcast_name": func_call["podcast_name"],
"id": func_call["id"],
"duration_sec": func_call["duration_sec"],
"host": func_call['host'],
"upload_time": func_call['upload_time'],
"participant": func_call["participant"],
"audio_file_id": func_call['audio_file_id']
}
rec = {
"audioFileType": audioFileType.lower(),
"audioFileMetadata": audioFileMetadata
}
data = json.loads(json_util.dumps(rec))
dbconf.file_store.insert(rec)
response = {
"status": func_call["status"],
"msg": func_call["msg"],
"record": data
}
# print(response)
elif func_call["status"] == 400:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
elif func_call["status"] == 500:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
#for audiobook type
elif audioFileType.lower() == 'audiobook':
audioFileMetadata = "audiobook"
func_call = audiobook_upload(content['audioFileMetadata'])
if func_call["status"] == 200:
audioFileMetadata = {
"audiobook_title": func_call["audiobook_title"],
"id": func_call["id"],
"duration_sec": func_call["duration_sec"],
"author_title": func_call['author_title'],
"upload_time": func_call['upload_time'],
"narrator": func_call["narrator"],
"audio_file_id": func_call['audio_file_id']
}
rec = {
"audioFileType": audioFileType.lower(),
"audioFileMetadata": audioFileMetadata
}
data = json.loads(json_util.dumps(rec))
dbconf.file_store.insert(rec)
response = {
"status": func_call["status"],
"msg": func_call["msg"],
"record": data
}
# print(response)
elif func_call["status"] == 400:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
elif func_call["status"] == 500:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
# print(response)
else:
response = {
"status": 400,
"msg": "Bad request."
}
else:
response = {
"status": 400,
"msg": "Bad request."
}
return jsonify(response)
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return jsonify(response)
@file_upload.route('api/_delete/<string:audioFileType>/<int:audioFileID>', methods= ['DELETE'])
def delete_(audioFileType, audioFileID):
try:
if request.method == "DELETE":
cursor = dbconf.file_store.find({"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID})
if cursor.count() != 0:
dbconf.file_store.remove({"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID})
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "audio file ID is not found.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Bad request."
}
return jsonify(response)
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return jsonify(response)
@file_upload.route('api/_update/<string:audioFileType>/<int:audioFileID>', methods= ['PUT'])
def update(audioFileType, audioFileID):
try:
if request.method == "PUT":
content = request.json
cursor = dbconf.file_store.find({"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID})
if cursor.count() != 0:
#song type
if audioFileType.lower() == 'song':
song_name = content["audioFileMetadata"]["song_name"]
duration_sec = content["audioFileMetadata"]["duration_sec"]
if len(song_name) != 0 and len(song_name) <= 100:
if duration_sec >= 0:
myquery = {"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID}
newvalues = { "$set": {
"audioFileMetadata.duration_sec": duration_sec,
"audioFileMetadata.song_name": song_name,
"audioFileMetadata.upload_time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}}
dbconf.file_store.update_one(myquery, newvalues)
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#for duration
else:
response = {
"status": 400,
"msg": "Duration should be positive integer number",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#for song name
else:
response = {
"status": 400,
"msg": "Song name should be between 0 to 100 characters",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#podcast type
elif audioFileType.lower() == 'podcast':
podcast_name = content["audioFileMetadata"]["podcast_name"]
duration_sec = content["audioFileMetadata"]["duration_sec"]
host = content["audioFileMetadata"]["host"]
participant = content["audioFileMetadata"]["participant"]
if len(podcast_name) != 0 and len(podcast_name) <= 100:
if duration_sec >= 0:
exceed_leng = [ x for x in participant if len(x) >= 100]
if len(participant) <= 10 and len(exceed_leng) == 0:
if len(host) != 0 and len(host) <= 100:
myquery = {"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID}
newvalues = { "$set": {
"audioFileMetadata.podcast_name": podcast_name,
"audioFileMetadata.duration_sec": duration_sec,
"audioFileMetadata.host": host,
"audioFileMetadata.participant": participant,
"audioFileMetadata.upload_time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}}
dbconf.file_store.update_one(myquery, newvalues)
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#for host
else:
response = {
"status": 400,
"msg": "Host should be between 0 to 100 characters",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#participant
else:
response = {
"status": 400,
"msg": "Each string cannot be larger than 100 characters, maximum of 10 participants possible",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#duration
else:
response = {
"status": 400,
"msg": "Duration should be positive integer number",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#podcast_name
else:
response = {
"status": 400,
"msg": "Name of the podcast should be between 0 to 100 characters",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#audiobook type
elif audioFileType.lower() == 'audiobook':
audiobook_title = content["audioFileMetadata"]["audiobook_title"]
duration_sec = content["audioFileMetadata"]["duration_sec"]
author_title = content["audioFileMetadata"]["author_title"]
narrator = content["audioFileMetadata"]["narrator"]
if len(audiobook_title) != 0 and len(audiobook_title) <= 100:
if len(author_title) != 0 and len(author_title) <= 100:
if len(narrator) != 0 and len(narrator) <=100:
if duration_sec >= 0:
myquery = {"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID}
newvalues = { "$set": {
"audioFileMetadata.audiobook_title": audiobook_title,
"audioFileMetadata.duration_sec": duration_sec,
"audioFileMetadata.author_title": author_title,
"audioFileMetadata.narrator": narrator,
"audioFileMetadata.upload_time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}}
dbconf.file_store.update_one(myquery, newvalues)
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Duration should be positive integer number",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Narrator should be between 0 to 100 characters.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Author title should be between 0 to 100 characters.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Audiobook should be between 0 to 100 characters.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "audio file ID is not found.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Bad request."
}
return jsonify(response)
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return jsonify(response)
@file_upload.route("api/_getapi/<audioFileType>", methods=["GET"], defaults={"audioFileID": None})
@file_upload.route('api/_getapi/<string:audioFileType>/<int:audioFileID>', methods= ['GET'])
def getapi(audioFileType, audioFileID):
try:
if request.method == 'GET':
if audioFileID is not None:
cursor = dbconf.file_store.find({"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID})
if cursor.count() != 0:
for rec in cursor:
if rec["audioFileType"] == 'song':
audio_file = rec["audioFileMetadata"]["song_name"]
if rec["audioFileType"] == 'podcast':
audio_file= rec["audioFileMetadata"]["podcast_name"]
if rec["audioFileType"] == 'audiobook':
audio_file= rec["audioFileMetadata"]["audiobook_title"]
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audio_file": audio_file
}
else:
response = {
"status": 400,
"msg": "audio file ID is not found.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
cursor = dbconf.file_store.find({"audioFileType": str(audioFileType.lower())})
if cursor.count() != 0:
audio_list = []
for rec in cursor:
if rec["audioFileType"] == 'song':
audio_list.append(rec["audioFileMetadata"]["song_name"])
if rec["audioFileType"] == 'podcast':
audio_list.append(rec["audioFileMetadata"]["podcast_name"])
if rec["audioFileType"] == 'audiobook':
audio_list.append(rec["audioFileMetadata"]["audiobook_title"])
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audio_list": audio_list
}
else:
response = {
"status": 400,
"msg": "Audio files not found.",
"audioFileType": audioFileType
}
else:
response = {
"status": 400,
"msg": "Bad request."
}
return jsonify(response)
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return jsonify(response)
| 45.376923
| 137
| 0.398644
| 2,113
| 29,495
| 5.404165
| 0.072409
| 0.069884
| 0.032752
| 0.056047
| 0.807164
| 0.776513
| 0.707417
| 0.693318
| 0.684648
| 0.622646
| 0
| 0.021991
| 0.508188
| 29,495
| 649
| 138
| 45.446841
| 0.765201
| 0.016443
| 0
| 0.716194
| 0
| 0
| 0.187317
| 0.029914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011686
| false
| 0
| 0.008347
| 0
| 0.043406
| 0.015025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b8ada7f96a8a91b1795a09283b5bb56adf3d888d
| 2,373
|
py
|
Python
|
tests/_geom/test_path_control_x_interface.py
|
ynsnf/apysc
|
b10ffaf76ec6beb187477d0a744fca00e3efc3fb
|
[
"MIT"
] | 16
|
2021-04-16T02:01:29.000Z
|
2022-01-01T08:53:49.000Z
|
tests/_geom/test_path_control_x_interface.py
|
ynsnf/apysc
|
b10ffaf76ec6beb187477d0a744fca00e3efc3fb
|
[
"MIT"
] | 613
|
2021-03-24T03:37:38.000Z
|
2022-03-26T10:58:37.000Z
|
tests/_geom/test_path_control_x_interface.py
|
simon-ritchie/apyscript
|
c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279
|
[
"MIT"
] | 2
|
2021-06-20T07:32:58.000Z
|
2021-12-26T08:22:11.000Z
|
from random import randint
from retrying import retry
import apysc as ap
from apysc._geom.path_control_x_interface import PathControlXInterface
class TestPathControlXInterface:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_control_x(self) -> None:
interface: PathControlXInterface = PathControlXInterface()
interface._control_x = ap.Int(0)
interface.control_x = ap.Int(10)
assert interface.control_x == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__make_snapshot(self) -> None:
interface: PathControlXInterface = PathControlXInterface()
interface._control_x = ap.Int(10)
snapshot_name: str = interface._get_next_snapshot_name()
interface._run_all_make_snapshot_methods(snapshot_name=snapshot_name)
assert interface._control_x_snapshots[snapshot_name] == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__revert(self) -> None:
interface: PathControlXInterface = PathControlXInterface()
interface._control_x = ap.Int(10)
snapshot_name: str = interface._get_next_snapshot_name()
interface._run_all_revert_methods(snapshot_name=snapshot_name)
assert interface.control_x == 10
interface._run_all_make_snapshot_methods(snapshot_name=snapshot_name)
interface._control_x = ap.Int(20)
interface._run_all_revert_methods(snapshot_name=snapshot_name)
assert interface.control_x == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__initialize_control_x_if_not_initialized(self) -> None:
interface: PathControlXInterface = PathControlXInterface()
interface._initialize_control_x_if_not_initialized()
assert interface.control_x == 0
interface.control_x = ap.Int(10)
interface._initialize_control_x_if_not_initialized()
assert interface.control_x == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_control_x_linking_setting(self) -> None:
interface: PathControlXInterface = PathControlXInterface()
interface._initialize_control_x_if_not_initialized()
assert interface._attr_linking_stack['control_x'] == [ap.Int(0)]
| 43.944444
| 78
| 0.728614
| 283
| 2,373
| 5.681979
| 0.190813
| 0.099502
| 0.126866
| 0.056592
| 0.847637
| 0.825249
| 0.804104
| 0.77301
| 0.77301
| 0.758706
| 0
| 0.03283
| 0.191319
| 2,373
| 53
| 79
| 44.773585
| 0.805107
| 0
| 0
| 0.642857
| 0
| 0
| 0.003879
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.119048
| false
| 0
| 0.095238
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2595f5495569bfb18a30651ccf4bc3e61dec9b6
| 35
|
py
|
Python
|
analysis/Leo/scripts/__init__.py
|
data301-2020-winter2/course-project-group_1039
|
26d661a543ce9dcea61f579f9edbcde88543e7c3
|
[
"MIT"
] | 1
|
2021-02-09T02:13:23.000Z
|
2021-02-09T02:13:23.000Z
|
analysis/Leo/scripts/__init__.py
|
data301-2020-winter2/course-project-group_1039
|
26d661a543ce9dcea61f579f9edbcde88543e7c3
|
[
"MIT"
] | 31
|
2021-02-02T17:03:39.000Z
|
2021-04-13T03:22:16.000Z
|
analysis/Leo/scripts/__init__.py
|
data301-2020-winter2/course-project-group_1039
|
26d661a543ce9dcea61f579f9edbcde88543e7c3
|
[
"MIT"
] | 1
|
2021-03-14T05:56:16.000Z
|
2021-03-14T05:56:16.000Z
|
import scripts.project_functions
| 8.75
| 32
| 0.857143
| 4
| 35
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 3
| 33
| 11.666667
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a265970c825b69a6bcc7be605b442dbeced8128f
| 9,491
|
py
|
Python
|
app/jobHistory/migrations/0003_auto_20190804_1403.py
|
stephengtuggy/job-history
|
5c4931ff7b594494a687da0253262c7fc46f8b13
|
[
"MIT"
] | 2
|
2020-01-18T00:39:35.000Z
|
2020-01-18T02:03:26.000Z
|
app/jobHistory/migrations/0003_auto_20190804_1403.py
|
stephengtuggy/job-history
|
5c4931ff7b594494a687da0253262c7fc46f8b13
|
[
"MIT"
] | 18
|
2020-08-07T23:22:37.000Z
|
2021-06-10T18:38:42.000Z
|
app/jobHistory/migrations/0003_auto_20190804_1403.py
|
stephengtuggy/job-history
|
5c4931ff7b594494a687da0253262c7fc46f8b13
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-08-04 21:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobHistory', '0002_auto_20190106_0202'),
]
operations = [
migrations.AlterField(
model_name='employer',
name='city',
field=models.CharField(blank=True, max_length=200, verbose_name='City'),
),
migrations.AlterField(
model_name='employer',
name='country',
field=models.CharField(blank=True, max_length=200, verbose_name='Country'),
),
migrations.AlterField(
model_name='employer',
name='county_or_parish',
field=models.CharField(blank=True, max_length=200, verbose_name='County or Parish'),
),
migrations.AlterField(
model_name='employer',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='Email'),
),
migrations.AlterField(
model_name='employer',
name='industry',
field=models.CharField(blank=True, max_length=254, verbose_name='Industry'),
),
migrations.AlterField(
model_name='employer',
name='long_name',
field=models.CharField(max_length=254, null=True, unique=True, verbose_name='Long Name'),
),
migrations.AlterField(
model_name='employer',
name='phone',
field=models.CharField(blank=True, max_length=50, verbose_name='Phone'),
),
migrations.AlterField(
model_name='employer',
name='short_name',
field=models.CharField(max_length=50, unique=True, verbose_name='Short Name'),
),
migrations.AlterField(
model_name='employer',
name='state_or_province',
field=models.CharField(blank=True, max_length=200, verbose_name='State or Province'),
),
migrations.AlterField(
model_name='employer',
name='zip_or_postal_code',
field=models.CharField(blank=True, max_length=50, verbose_name='Zip Code or Postal Code'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='contributions_and_accomplishments',
field=models.TextField(blank=True, verbose_name='Contributions and Accomplishments'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='end_day',
field=models.PositiveSmallIntegerField(null=True, verbose_name='End Day'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='end_month',
field=models.PositiveSmallIntegerField(null=True, verbose_name='End Month'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='end_year',
field=models.PositiveIntegerField(null=True, verbose_name='End Year'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='ending_pay',
field=models.CharField(max_length=50, verbose_name='Ending Pay'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='hours_per_week',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Hours per Week'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='is_current_position',
field=models.BooleanField(default=True, verbose_name='Current Position?'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='position',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobHistory.Position', verbose_name='Position'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='start_day',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Start Day'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='start_month',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Start Month'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='start_year',
field=models.PositiveIntegerField(verbose_name='Start Year'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='starting_pay',
field=models.CharField(max_length=50, verbose_name='Starting Pay'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_city',
field=models.CharField(blank=True, max_length=200, verbose_name='Work City'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_country',
field=models.CharField(blank=True, max_length=200, verbose_name='Work Country'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_county_or_parish',
field=models.CharField(blank=True, max_length=200, verbose_name='Work County or Parish'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_state_or_province',
field=models.CharField(blank=True, max_length=200, verbose_name='Work State or Province'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_zip_or_postal_code',
field=models.CharField(blank=True, max_length=50, verbose_name='Work Zip Code or Postal Code'),
),
migrations.AlterField(
model_name='position',
name='can_contact',
field=models.BooleanField(verbose_name='Can Contact?'),
),
migrations.AlterField(
model_name='position',
name='contributions_and_accomplishments',
field=models.TextField(blank=True, verbose_name='Contributions and Accomplishments'),
),
migrations.AlterField(
model_name='position',
name='employer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobHistory.Employer', verbose_name='Employer'),
),
migrations.AlterField(
model_name='position',
name='responsibilities',
field=models.TextField(blank=True, verbose_name='Responsibilities'),
),
migrations.AlterField(
model_name='position',
name='supervisor_city',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor City'),
),
migrations.AlterField(
model_name='position',
name='supervisor_country',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor Country'),
),
migrations.AlterField(
model_name='position',
name='supervisor_county_or_parish',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor County or Parish'),
),
migrations.AlterField(
model_name='position',
name='supervisor_email',
field=models.EmailField(blank=True, max_length=254, verbose_name='Supervisor Email'),
),
migrations.AlterField(
model_name='position',
name='supervisor_given_name',
field=models.CharField(max_length=200, verbose_name='Supervisor Given Name'),
),
migrations.AlterField(
model_name='position',
name='supervisor_middle_name',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor Middle Name'),
),
migrations.AlterField(
model_name='position',
name='supervisor_name_prefix',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Name Prefix'),
),
migrations.AlterField(
model_name='position',
name='supervisor_name_suffix',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Name Suffix'),
),
migrations.AlterField(
model_name='position',
name='supervisor_phone',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Phone'),
),
migrations.AlterField(
model_name='position',
name='supervisor_state_or_province',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor State or Province'),
),
migrations.AlterField(
model_name='position',
name='supervisor_surname',
field=models.CharField(max_length=200, verbose_name='Supervisor Surname'),
),
migrations.AlterField(
model_name='position',
name='supervisor_zip_or_postal_code',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Zip Code or Postal Code'),
),
migrations.AlterField(
model_name='position',
name='title',
field=models.CharField(max_length=200, verbose_name='Title'),
),
]
| 40.387234
| 132
| 0.603624
| 912
| 9,491
| 6.080044
| 0.105263
| 0.158702
| 0.198377
| 0.230117
| 0.871776
| 0.858251
| 0.769702
| 0.482597
| 0.398918
| 0.354914
| 0
| 0.016338
| 0.284164
| 9,491
| 234
| 133
| 40.559829
| 0.799823
| 0.004741
| 0
| 0.596491
| 1
| 0
| 0.19568
| 0.034519
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008772
| 0
| 0.02193
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2c1f4bdfbf091de32f73f29f4fe1cc1d9bf86e8
| 2,081
|
py
|
Python
|
01 Dimensionality Reduction/Tutorial 03 - Unsupervised nonlinear embedding/isomap/dijkstra.py
|
KateYeon/Business-Anlaytics
|
454c1cb1b88499e94eeb5e8a7a32309afb7165e5
|
[
"MIT"
] | null | null | null |
01 Dimensionality Reduction/Tutorial 03 - Unsupervised nonlinear embedding/isomap/dijkstra.py
|
KateYeon/Business-Anlaytics
|
454c1cb1b88499e94eeb5e8a7a32309afb7165e5
|
[
"MIT"
] | null | null | null |
01 Dimensionality Reduction/Tutorial 03 - Unsupervised nonlinear embedding/isomap/dijkstra.py
|
KateYeon/Business-Anlaytics
|
454c1cb1b88499e94eeb5e8a7a32309afb7165e5
|
[
"MIT"
] | null | null | null |
class Graph(object):
"""
A simple undirected, weighted graph
"""
def __init__(self):
self.nodes = set()
self.edges = {}
self.distances = {}
def add_node(self, value):
self.nodes.add(value)
def add_edge(self, from_node, to_node, distance):
self._add_edge(from_node, to_node, distance)
self._add_edge(to_node, from_node, distance)
def _add_edge(self, from_node, to_node, distance):
self.edges.setdefault(from_node, [])
self.edges[from_node].append(to_node)
self.distances[(from_node, to_node)] = distance
def dijkstra(graph, initial_node):
visited = {initial_node: 0}
nodes = set(graph.nodes)
while nodes:
min_node = None
for node in nodes:
if node in visited:
if min_node is None:
min_node = node
elif visited[node] < visited[min_node]:
min_node = node
if min_node is None:
break
nodes.remove(min_node)
cur_wt = visited[min_node]
for edge in graph.edges[min_node]:
wt = cur_wt + graph.distances[(min_node, edge)]
if edge not in visited or wt < visited[edge]:
visited[edge] = wt
return visited
def dijkstra2(graph, initial_node):
visited = {initial_node: 0}
nodes = set(graph.nodes)
while nodes:
min_node = None
for node in nodes:
if node in visited:
if min_node is None:
min_node = node
elif visited[node] < visited[min_node]:
min_node = node
if min_node is None:
break
nodes.remove(min_node)
cur_wt = visited[min_node]
for edge in graph.edges[min_node]:
wt = cur_wt + graph.distances[(min_node, edge)]
if edge not in visited or wt < visited[edge]:
visited[edge] = wt
return visited
| 26.341772
| 60
| 0.537242
| 254
| 2,081
| 4.192913
| 0.169291
| 0.131455
| 0.037559
| 0.052582
| 0.777465
| 0.756808
| 0.756808
| 0.756808
| 0.719249
| 0.719249
| 0
| 0.002317
| 0.377703
| 2,081
| 78
| 61
| 26.679487
| 0.820077
| 0.016819
| 0
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2d7927bd74ff2bc70037658a7110cb4dffa918c
| 43
|
py
|
Python
|
rcds/project/__init__.py
|
jordanbertasso/rcds
|
d3d655a59a350042d65476793db84e761de04829
|
[
"BSD-3-Clause"
] | 5
|
2020-07-13T12:40:02.000Z
|
2021-08-21T11:18:28.000Z
|
rcds/project/__init__.py
|
jordanbertasso/rcds
|
d3d655a59a350042d65476793db84e761de04829
|
[
"BSD-3-Clause"
] | 144
|
2020-07-06T11:26:49.000Z
|
2022-02-01T14:33:28.000Z
|
rcds/project/__init__.py
|
jordanbertasso/rcds
|
d3d655a59a350042d65476793db84e761de04829
|
[
"BSD-3-Clause"
] | 7
|
2020-07-22T12:38:32.000Z
|
2021-12-21T14:27:54.000Z
|
from .project import Project # noqa: F401
| 21.5
| 42
| 0.744186
| 6
| 43
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0.186047
| 43
| 1
| 43
| 43
| 0.828571
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a2ff595beb35cc3bf63e8eee3f852f028caee135
| 55,499
|
py
|
Python
|
pipelines/head-pose-pipeline/training/models.py
|
tonouchi510/kfp-project
|
67b78ae53cc3de594b8254999a4f553a8d5cec27
|
[
"MIT"
] | null | null | null |
pipelines/head-pose-pipeline/training/models.py
|
tonouchi510/kfp-project
|
67b78ae53cc3de594b8254999a4f553a8d5cec27
|
[
"MIT"
] | null | null | null |
pipelines/head-pose-pipeline/training/models.py
|
tonouchi510/kfp-project
|
67b78ae53cc3de594b8254999a4f553a8d5cec27
|
[
"MIT"
] | null | null | null |
import sys
import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from capsulelayers import CapsuleLayer
from capsulelayers import MatMulLayer
from loupe_keras import NetVLAD
sys.setrecursionlimit(2**20)
np.random.seed(2**10)
# Custom layers
# Note - Usage of Lambda layers prevent the convertion
# and the optimizations by the underlying math engine (tensorflow in this case)
class SSRLayer(tf.keras.layers.Layer):
def __init__(self, s1, s2, s3, lambda_d, **kwargs):
super(SSRLayer, self).__init__(**kwargs)
self.s1 = s1
self.s2 = s2
self.s3 = s3
self.lambda_d = lambda_d
self.trainable = False
def call(self, inputs):
x = inputs
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
s1 = self.s1
s2 = self.s2
s3 = self.s3
lambda_d = self.lambda_d
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
c = c / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4])) / (s3 * (1 + lambda_d * x[5]))
pred = (a + b + c) * V
return pred
def compute_output_shape(self, input_shape):
return (input_shape[0], 3)
def get_config(self):
config = {
"s1": self.s1,
"s2": self.s2,
"s3": self.s3,
"lambda_d": self.lambda_d,
}
base_config = super(SSRLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FeatSliceLayer(tf.keras.layers.Layer):
def __init__(self, start_index, end_index, **kwargs):
super(FeatSliceLayer, self).__init__(**kwargs)
self.start_index = start_index
self.end_index = end_index
self.trainable = False
def call(self, inputs):
return inputs[:, self.start_index:self.end_index]
def compute_output_shape(self, input_shape):
return (input_shape[0], self.end_index - self.start_index)
def get_config(self):
config = {"start_index": self.start_index, "end_index": self.end_index}
base_config = super(FeatSliceLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MomentsLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MomentsLayer, self).__init__(**kwargs)
self.trainable = False
def call(self, inputs):
_, var = tf.nn.moments(inputs, axes=-1)
return var
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
class MatrixMultiplyLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MatrixMultiplyLayer, self).__init__(**kwargs)
self.trainable = False
def call(self, inputs):
x1, x2 = inputs
# TODO: add some asserts on the inputs
# it is expected the shape of inputs are
# arranged to be able to perform the matrix multiplication
return tf.matmul(x1, x2)
def compute_output_shape(self, input_shapes):
return (input_shapes[0][0], input_shapes[0][1], input_shapes[1][-1])
class MatrixNormLayer(tf.keras.layers.Layer):
def __init__(self, tile_count, **kwargs):
super(MatrixNormLayer, self).__init__(**kwargs)
self.trainable = False
self.tile_count = tile_count
def call(self, input):
sum = K.sum(input, axis=-1, keepdims=True)
tiled = K.tile(sum, (1, 1, self.tile_count))
return tiled
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], self.tile_count)
def get_config(self):
config = {"tile_count": self.tile_count}
base_config = super(MatrixNormLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class PrimCapsLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(PrimCapsLayer, self).__init__(**kwargs)
self.trainable = False
def call(self, inputs):
x1, x2, norm = inputs
return tf.matmul(x1, x2) / norm
def compute_output_shape(self, input_shapes):
return input_shapes[-1]
class AggregatedFeatureExtractionLayer(tf.keras.layers.Layer):
def __init__(self, num_capsule, **kwargs):
super(AggregatedFeatureExtractionLayer, self).__init__(**kwargs)
self.trainable = False
self.num_capsule = num_capsule
def call(self, input):
s1_a = 0
s1_b = self.num_capsule // 3
feat_s1_div = input[:, s1_a:s1_b, :]
s2_a = self.num_capsule // 3
s2_b = 2 * self.num_capsule // 3
feat_s2_div = input[:, s2_a:s2_b, :]
s3_a = 2 * self.num_capsule // 3
s3_b = self.num_capsule
feat_s3_div = input[:, s3_a:s3_b, :]
return [feat_s1_div, feat_s2_div, feat_s3_div]
def compute_output_shape(self, input_shape):
last_dim = input_shape[-1]
partition = self.num_capsule // 3
return [
(input_shape[0], partition, last_dim),
(input_shape[0], partition, last_dim),
(input_shape[0], partition, last_dim),
]
def get_config(self):
config = {"num_capsule": self.num_capsule}
base_config = super(AggregatedFeatureExtractionLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class BaseFSANet(object):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
self._channel_axis = 3 if K.image_data_format() == "channels_last" else 1
if self._channel_axis == 1:
logging.debug("image_dim_ordering = 'th'")
self._input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
self.num_capsule = S_set[0]
self.dim_capsule = S_set[1]
self.routings = S_set[2]
self.num_primcaps = S_set[3]
self.m_dim = S_set[4]
self.F_shape = int(self.num_capsule / 3) * self.dim_capsule
self.map_xy_size = int(8 * image_size / 64)
self.is_fc_model = False
self.is_noS_model = False
self.is_varS_model = False
def _convBlock(self, x, num_filters, activation, kernel_size=(3, 3)):
x = tf.keras.layers.SeparableConv2D(num_filters, kernel_size, padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation(activation)(x)
return x
def ssr_G_model_build(self, img_inputs):
# -------------------------------------------------------------------------------------------------------------------------
x = self._convBlock(img_inputs, num_filters=16, activation="relu")
x_layer1 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = self._convBlock(x_layer1, num_filters=32, activation="relu")
x = self._convBlock(x, num_filters=32, activation="relu")
x_layer2 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = self._convBlock(x_layer2, num_filters=64, activation="relu")
x = self._convBlock(x, num_filters=64, activation="relu")
x_layer3 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = self._convBlock(x_layer3, num_filters=128, activation="relu")
x_layer4 = self._convBlock(x, num_filters=128, activation="relu")
# -------------------------------------------------------------------------------------------------------------------------
s = self._convBlock(img_inputs, num_filters=16, activation="tanh")
s_layer1 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = self._convBlock(s_layer1, num_filters=32, activation="tanh")
s = self._convBlock(s, num_filters=32, activation="tanh")
s_layer2 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = self._convBlock(s_layer2, num_filters=64, activation="tanh")
s = self._convBlock(s, num_filters=64, activation="tanh")
s_layer3 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = self._convBlock(s_layer3, num_filters=128, activation="tanh")
s_layer4 = self._convBlock(s, num_filters=128, activation="tanh")
# -------------------------------------------------------------------------------------------------------------------------
s_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer4)
x_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer4)
feat_s1_pre = tf.keras.layers.Multiply()([s_layer4, x_layer4])
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer3)
x_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer3)
feat_s2_pre = tf.keras.layers.Multiply()([s_layer3, x_layer3])
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer2)
x_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer2)
feat_s3_pre = tf.keras.layers.Multiply()([s_layer2, x_layer2])
# -------------------------------------------------------------------------------------------------------------------------
# Spatial Pyramid Pooling
# feat_s1_pre = SpatialPyramidPooling([1, 2, 4],'average')(feat_s1_pre)
# feat_s2_pre = SpatialPyramidPooling([1, 2, 4],'average')(feat_s2_pre)
# feat_s3_pre = SpatialPyramidPooling([1, 2, 4],'average')(feat_s3_pre)
# feat_s1_pre = Globaltf.keras.layers.AveragePooling2D()(feat_s1_pre)
# feat_s2_pre = Globaltf.keras.layers.AveragePooling2D()(feat_s2_pre)
feat_s3_pre = tf.keras.layers.AveragePooling2D((2, 2))(
feat_s3_pre
) # make sure (8x8x64) feature maps
return tf.keras.models.Model(
inputs=img_inputs,
outputs=[feat_s1_pre, feat_s2_pre, feat_s3_pre],
name="ssr_G_model",
)
def ssr_F_model_build(self, feat_dim, name_F):
input_s1_pre = tf.keras.layers.Input((feat_dim,))
input_s2_pre = tf.keras.layers.Input((feat_dim,))
input_s3_pre = tf.keras.layers.Input((feat_dim,))
def _process_input(stage_index, stage_num, num_classes, input_s_pre):
feat_delta_s = FeatSliceLayer(0, 4)(input_s_pre)
delta_s = tf.keras.layers.Dense(
num_classes, activation="tanh", name=f"delta_s{stage_index}"
)(feat_delta_s)
feat_local_s = FeatSliceLayer(4, 8)(input_s_pre)
local_s = tf.keras.layers.Dense(
units=num_classes,
activation="tanh",
name=f"local_delta_stage{stage_index}",
)(feat_local_s)
feat_pred_s = FeatSliceLayer(8, 16)(input_s_pre)
feat_pred_s = tf.keras.layers.Dense(
stage_num * num_classes, activation="relu"
)(feat_pred_s)
pred_s = tf.keras.layers.Reshape((num_classes, stage_num))(feat_pred_s)
return delta_s, local_s, pred_s
delta_s1, local_s1, pred_s1 = _process_input(
1, self.stage_num[0], self.num_classes, input_s1_pre
)
delta_s2, local_s2, pred_s2 = _process_input(
2, self.stage_num[1], self.num_classes, input_s2_pre
)
delta_s3, local_s3, pred_s3 = _process_input(
3, self.stage_num[2], self.num_classes, input_s3_pre
)
return tf.keras.models.Model(
inputs=[input_s1_pre, input_s2_pre, input_s3_pre],
outputs=[
pred_s1,
pred_s2,
pred_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
],
name=name_F,
)
def ssr_FC_model_build(self, feat_dim, name_F):
input_s1_pre = tf.keras.layers.Input((feat_dim,))
input_s2_pre = tf.keras.layers.Input((feat_dim,))
input_s3_pre = tf.keras.layers.Input((feat_dim,))
def _process_input(stage_index, stage_num, num_classes, input_s_pre):
feat_delta_s = tf.keras.layers.Dense(2 * num_classes, activation="tanh")(
input_s_pre
)
delta_s = tf.keras.layers.Dense(
num_classes, activation="tanh", name=f"delta_s{stage_index}"
)(feat_delta_s)
feat_local_s = tf.keras.layers.Dense(2 * num_classes, activation="tanh")(
input_s_pre
)
local_s = tf.keras.layers.Dense(
units=num_classes,
activation="tanh",
name=f"local_delta_stage{stage_index}",
)(feat_local_s)
feat_pred_s = tf.keras.layers.Dense(
stage_num * num_classes, activation="relu"
)(input_s_pre)
pred_s = tf.keras.layers.Reshape((num_classes, stage_num))(feat_pred_s)
return delta_s, local_s, pred_s
delta_s1, local_s1, pred_s1 = _process_input(
1, self.stage_num[0], self.num_classes, input_s1_pre
)
delta_s2, local_s2, pred_s2 = _process_input(
2, self.stage_num[1], self.num_classes, input_s2_pre
)
delta_s3, local_s3, pred_s3 = _process_input(
3, self.stage_num[2], self.num_classes, input_s3_pre
)
return tf.keras.models.Model(
inputs=[input_s1_pre, input_s2_pre, input_s3_pre],
outputs=[
pred_s1,
pred_s2,
pred_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
],
name=name_F,
)
def ssr_feat_S_model_build(self, m_dim):
input_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
if self.is_varS_model:
feat_preS = MomentsLayer()(input_preS)
else:
feat_preS = tf.keras.layers.Conv2D(
1, (1, 1), padding="same", activation="sigmoid"
)(input_preS)
feat_preS = tf.keras.layers.Reshape((-1,))(feat_preS)
SR_matrix = tf.keras.layers.Dense(
m_dim * (self.map_xy_size * self.map_xy_size * 3), activation="sigmoid"
)(feat_preS)
SR_matrix = tf.keras.layers.Reshape(
(m_dim, (self.map_xy_size * self.map_xy_size * 3))
)(SR_matrix)
return tf.keras.models.Model(
inputs=input_preS, outputs=[SR_matrix, feat_preS], name="feat_S_model"
)
def ssr_S_model_build(self, num_primcaps, m_dim):
input_s1_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
input_s2_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
input_s3_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
feat_S_model = self.ssr_feat_S_model_build(m_dim)
SR_matrix_s1, feat_s1_preS = feat_S_model(input_s1_preS)
SR_matrix_s2, feat_s2_preS = feat_S_model(input_s2_preS)
SR_matrix_s3, feat_s3_preS = feat_S_model(input_s3_preS)
feat_pre_concat = tf.keras.layers.Concatenate()(
[feat_s1_preS, feat_s2_preS, feat_s3_preS]
)
SL_matrix = tf.keras.layers.Dense(
int(num_primcaps / 3) * m_dim, activation="sigmoid"
)(feat_pre_concat)
SL_matrix = tf.keras.layers.Reshape((int(num_primcaps / 3), m_dim))(SL_matrix)
S_matrix_s1 = MatrixMultiplyLayer(name="S_matrix_s1")([SL_matrix, SR_matrix_s1])
S_matrix_s2 = MatrixMultiplyLayer(name="S_matrix_s2")([SL_matrix, SR_matrix_s2])
S_matrix_s3 = MatrixMultiplyLayer(name="S_matrix_s3")([SL_matrix, SR_matrix_s3])
# Very important!!! Without this training won't converge.
# norm_S_s1 = Lambda(lambda x: K.tile(K.sum(x,axis=-1,keepdims=True),(1,1,64)))(S_matrix_s1)
norm_S_s1 = MatrixNormLayer(tile_count=64)(S_matrix_s1)
norm_S_s2 = MatrixNormLayer(tile_count=64)(S_matrix_s2)
norm_S_s3 = MatrixNormLayer(tile_count=64)(S_matrix_s3)
feat_s1_pre = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s1_preS)
feat_s2_pre = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s2_preS)
feat_s3_pre = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s3_preS)
feat_pre_concat = tf.keras.layers.Concatenate(axis=1)(
[feat_s1_pre, feat_s2_pre, feat_s3_pre]
)
# Warining: don't use keras's 'K.dot'. It is very weird when high dimension is used.
# https://github.com/keras-team/keras/issues/9779
# Make sure 'tf.matmul' is used
# primcaps = Lambda(lambda x: tf.matmul(x[0],x[1])/x[2])([S_matrix,feat_pre_concat, norm_S])
primcaps_s1 = PrimCapsLayer()([S_matrix_s1, feat_pre_concat, norm_S_s1])
primcaps_s2 = PrimCapsLayer()([S_matrix_s2, feat_pre_concat, norm_S_s2])
primcaps_s3 = PrimCapsLayer()([S_matrix_s3, feat_pre_concat, norm_S_s3])
primcaps = tf.keras.layers.Concatenate(axis=1)(
[primcaps_s1, primcaps_s2, primcaps_s3]
)
return tf.keras.models.Model(
inputs=[input_s1_preS, input_s2_preS, input_s3_preS],
outputs=primcaps,
name="ssr_S_model",
)
def ssr_noS_model_build(self, **kwargs):
input_s1_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
input_s2_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
input_s3_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
primcaps_s1 = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s1_preS)
primcaps_s2 = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s2_preS)
primcaps_s3 = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s3_preS)
primcaps = tf.keras.layers.Concatenate(axis=1)(
[primcaps_s1, primcaps_s2, primcaps_s3]
)
return tf.keras.models.Model(
inputs=[input_s1_preS, input_s2_preS, input_s3_preS],
outputs=primcaps,
name="ssr_S_model",
)
def __call__(self):
logging.debug("Creating model...")
img_inputs = tf.keras.layers.Input(self._input_shape)
# Build various models
ssr_G_model = self.ssr_G_model_build(img_inputs)
if self.is_noS_model:
ssr_S_model = self.ssr_noS_model_build()
else:
ssr_S_model = self.ssr_S_model_build(
num_primcaps=self.num_primcaps, m_dim=self.m_dim
)
ssr_aggregation_model = self.ssr_aggregation_model_build(
(self.num_primcaps, 64)
)
if self.is_fc_model:
ssr_F_Cap_model = self.ssr_FC_model_build(self.F_shape, "ssr_F_Cap_model")
else:
ssr_F_Cap_model = self.ssr_F_model_build(self.F_shape, "ssr_F_Cap_model")
# Wire them up
ssr_G_list = ssr_G_model(img_inputs)
ssr_primcaps = ssr_S_model(ssr_G_list)
ssr_Cap_list = ssr_aggregation_model(ssr_primcaps)
ssr_F_Cap_list = ssr_F_Cap_model(ssr_Cap_list)
pred_pose = SSRLayer(
s1=self.stage_num[0],
s2=self.stage_num[1],
s3=self.stage_num[2],
lambda_d=self.lambda_d,
name="pred_pose",
)(ssr_F_Cap_list)
return tf.keras.models.Model(inputs=img_inputs, outputs=pred_pose)
# Capsule FSANetworks
class BaseCapsuleFSANet(BaseFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(BaseCapsuleFSANet, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
def ssr_aggregation_model_build(self, shape_primcaps):
input_primcaps = tf.keras.layers.Input(shape_primcaps)
capsule = CapsuleLayer(
self.num_capsule, self.dim_capsule, routings=self.routings, name="caps"
)(input_primcaps)
feat_s1_div, feat_s2_div, feat_s3_div = AggregatedFeatureExtractionLayer(
num_capsule=self.num_capsule
)(capsule)
feat_s1_div = tf.keras.layers.Reshape((-1,))(feat_s1_div)
feat_s2_div = tf.keras.layers.Reshape((-1,))(feat_s2_div)
feat_s3_div = tf.keras.layers.Reshape((-1,))(feat_s3_div)
return tf.keras.models.Model(
inputs=input_primcaps,
outputs=[feat_s1_div, feat_s2_div, feat_s3_div],
name="ssr_Cap_model",
)
class FSA_net_Capsule(BaseCapsuleFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Capsule, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = False
class FSA_net_Var_Capsule(BaseCapsuleFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Var_Capsule, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = True
class FSA_net_noS_Capsule(BaseCapsuleFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_noS_Capsule, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_noS_model = True
class FSA_net_Capsule_FC(FSA_net_Capsule):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Capsule_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
class FSA_net_Var_Capsule_FC(FSA_net_Var_Capsule):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Var_Capsule_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
class FSA_net_noS_Capsule_FC(FSA_net_noS_Capsule):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_noS_Capsule_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
# NetVLAD models
class BaseNetVLADFSANet(BaseFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(BaseNetVLADFSANet, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
def ssr_aggregation_model_build(self, shape_primcaps):
input_primcaps = tf.keras.layers.Input(shape_primcaps)
agg_feat = NetVLAD(
feature_size=64,
max_samples=self.num_primcaps,
cluster_size=self.num_capsule,
output_dim=self.num_capsule * self.dim_capsule,
)(input_primcaps)
agg_feat = tf.keras.layers.Reshape((self.num_capsule, self.dim_capsule))(
agg_feat
)
feat_s1_div, feat_s2_div, feat_s3_div = AggregatedFeatureExtractionLayer(
num_capsule=self.num_capsule
)(agg_feat)
feat_s1_div = tf.keras.layers.Reshape((-1,))(feat_s1_div)
feat_s2_div = tf.keras.layers.Reshape((-1,))(feat_s2_div)
feat_s3_div = tf.keras.layers.Reshape((-1,))(feat_s3_div)
return tf.keras.models.Model(
inputs=input_primcaps,
outputs=[feat_s1_div, feat_s2_div, feat_s3_div],
name="ssr_Agg_model",
)
class FSA_net_NetVLAD(BaseNetVLADFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_NetVLAD, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = False
class FSA_net_Var_NetVLAD(BaseNetVLADFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Var_NetVLAD, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = True
class FSA_net_noS_NetVLAD(BaseNetVLADFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_noS_NetVLAD, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_noS_model = True
class FSA_net_NetVLAD_FC(FSA_net_NetVLAD):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_NetVLAD_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
class FSA_net_Var_NetVLAD_FC(FSA_net_Var_NetVLAD):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Var_NetVLAD_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
class FSA_net_noS_NetVLAD_FC(FSA_net_noS_NetVLAD):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_noS_NetVLAD_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
# // Metric models
class BaseMetricFSANet(BaseFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(BaseMetricFSANet, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
def ssr_aggregation_model_build(self, shape_primcaps):
input_primcaps = tf.keras.layers.Input(shape_primcaps)
metric_feat = MatMulLayer(16, type=1)(input_primcaps)
metric_feat = MatMulLayer(3, type=2)(metric_feat)
feat_s1_div, feat_s2_div, feat_s3_div = AggregatedFeatureExtractionLayer(
num_capsule=self.num_capsule
)(metric_feat)
feat_s1_div = tf.keras.layers.Reshape((-1,))(feat_s1_div)
feat_s2_div = tf.keras.layers.Reshape((-1,))(feat_s2_div)
feat_s3_div = tf.keras.layers.Reshape((-1,))(feat_s3_div)
return tf.keras.models.Model(
inputs=input_primcaps,
outputs=[feat_s1_div, feat_s2_div, feat_s3_div],
name="ssr_Metric_model",
)
class FSA_net_Metric(BaseMetricFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Metric, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = False
class FSA_net_Var_Metric(BaseMetricFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Var_Metric, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = True
class FSA_net_noS_Metric(BaseMetricFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_noS_Metric, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_noS_model = True
class SSR_net:
def __init__(self, image_size, stage_num, lambda_local, lambda_d):
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.stage_num = stage_num
self.lambda_local = lambda_local
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
inputs = tf.keras.layers.Input(shape=self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer1 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3))(x_layer1)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer2 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3))(x_layer2)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer3 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3))(x_layer3)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = tf.keras.layers.Conv2D(16, (3, 3))(inputs)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer1 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3))(s_layer1)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer2 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3))(s_layer2)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer3 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3))(s_layer3)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(s)
s_layer4 = tf.keras.layers.Flatten()(s_layer4)
s_layer4_mix = tf.keras.layers.Dropout(0.2)(s_layer4)
s_layer4_mix = tf.keras.layers.Dense(
units=self.stage_num[0], activation="relu"
)(s_layer4_mix)
x_layer4 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(x)
x_layer4 = tf.keras.layers.Flatten()(x_layer4)
x_layer4_mix = tf.keras.layers.Dropout(0.2)(x_layer4)
x_layer4_mix = tf.keras.layers.Dense(
units=self.stage_num[0], activation="relu"
)(x_layer4_mix)
feat_a_s1_pre = tf.keras.layers.Multiply()([s_layer4, x_layer4])
delta_s1 = tf.keras.layers.Dense(1, activation="tanh", name="delta_s1")(
feat_a_s1_pre
)
feat_a_s1 = tf.keras.layers.Multiply()([s_layer4_mix, x_layer4_mix])
feat_a_s1 = tf.keras.layers.Dense(2 * self.stage_num[0], activation="relu")(
feat_a_s1
)
pred_a_s1 = tf.keras.layers.Dense(
units=self.stage_num[0], activation="relu", name="pred_age_stage1"
)(feat_a_s1)
# feat_local_s1 = Lambda(lambda x: x/10)(feat_a_s1)
# feat_a_s1_local = Dropout(0.2)(pred_a_s1)
local_s1 = tf.keras.layers.Dense(
units=self.stage_num[0], activation="tanh", name="local_delta_stage1"
)(feat_a_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(s_layer2)
s_layer2 = tf.keras.layers.MaxPooling2D(4, 4)(s_layer2)
s_layer2 = tf.keras.layers.Flatten()(s_layer2)
s_layer2_mix = tf.keras.layers.Dropout(0.2)(s_layer2)
s_layer2_mix = tf.keras.layers.Dense(self.stage_num[1], activation="relu")(
s_layer2_mix
)
x_layer2 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(x_layer2)
x_layer2 = tf.keras.layers.AveragePooling2D(4, 4)(x_layer2)
x_layer2 = tf.keras.layers.Flatten()(x_layer2)
x_layer2_mix = tf.keras.layers.Dropout(0.2)(x_layer2)
x_layer2_mix = tf.keras.layers.Dense(self.stage_num[1], activation="relu")(
x_layer2_mix
)
feat_a_s2_pre = tf.keras.layers.Multiply()([s_layer2, x_layer2])
delta_s2 = tf.keras.layers.Dense(1, activation="tanh", name="delta_s2")(
feat_a_s2_pre
)
feat_a_s2 = tf.keras.layers.Multiply()([s_layer2_mix, x_layer2_mix])
feat_a_s2 = tf.keras.layers.Dense(2 * self.stage_num[1], activation="relu")(
feat_a_s2
)
pred_a_s2 = tf.keras.layers.Dense(
units=self.stage_num[1], activation="relu", name="pred_age_stage2"
)(feat_a_s2)
# feat_local_s2 = Lambda(lambda x: x/10)(feat_a_s2)
# feat_a_s2_local = Dropout(0.2)(pred_a_s2)
local_s2 = tf.keras.layers.Dense(
units=self.stage_num[1], activation="tanh", name="local_delta_stage2"
)(feat_a_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer1 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(s_layer1)
s_layer1 = tf.keras.layers.MaxPooling2D(8, 8)(s_layer1)
s_layer1 = tf.keras.layers.Flatten()(s_layer1)
s_layer1_mix = tf.keras.layers.Dropout(0.2)(s_layer1)
s_layer1_mix = tf.keras.layers.Dense(self.stage_num[2], activation="relu")(
s_layer1_mix
)
x_layer1 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(x_layer1)
x_layer1 = tf.keras.layers.AveragePooling2D(8, 8)(x_layer1)
x_layer1 = tf.keras.layers.Flatten()(x_layer1)
x_layer1_mix = tf.keras.layers.Dropout(0.2)(x_layer1)
x_layer1_mix = tf.keras.layers.Dense(self.stage_num[2], activation="relu")(
x_layer1_mix
)
feat_a_s3_pre = tf.keras.layers.Multiply()([s_layer1, x_layer1])
delta_s3 = tf.keras.layers.Dense(1, activation="tanh", name="delta_s3")(
feat_a_s3_pre
)
feat_a_s3 = tf.keras.layers.Multiply()([s_layer1_mix, x_layer1_mix])
feat_a_s3 = tf.keras.layers.Dense(2 * self.stage_num[2], activation="relu")(
feat_a_s3
)
pred_a_s3 = tf.keras.layers.Dense(
units=self.stage_num[2], activation="relu", name="pred_age_stage3"
)(feat_a_s3)
# feat_local_s3 = Lambda(lambda x: x/10)(feat_a_s3)
# feat_a_s3_local = Dropout(0.2)(pred_a_s3)
local_s3 = tf.keras.layers.Dense(
units=self.stage_num[2], activation="tanh", name="local_delta_stage3"
)(feat_a_s3)
# -------------------------------------------------------------------------------------------------------------------------
def merge_age(x, s1, s2, s3, lambda_local, lambda_d):
a = x[0][:, 0] * 0
b = x[0][:, 0] * 0
c = x[0][:, 0] * 0
# A = s1 * s2 * s3
V = 101
for i in range(0, s1):
a = a + (i + lambda_local * x[6][:, i]) * x[0][:, i]
a = K.expand_dims(a, -1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j + lambda_local * x[7][:, j]) * x[1][:, j]
b = K.expand_dims(b, -1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k + lambda_local * x[8][:, k]) * x[2][:, k]
c = K.expand_dims(c, -1)
c = c / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4])) / (s3 * (1 + lambda_d * x[5]))
age = (a + b + c) * V
return age
pred_a = tf.keras.layers.Lambda(
merge_age,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_local": self.lambda_local,
"lambda_d": self.lambda_d,
},
name="pred_a",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = tf.keras.models.Model(inputs=inputs, outputs=pred_a)
return model
class SSR_net_MT:
def __init__(self, image_size, num_classes, stage_num, lambda_d):
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
img_inputs = tf.keras.layers.Input(self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = tf.keras.layers.SeparableConv2D(16, (3, 3), padding="same")(img_inputs)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer1 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = tf.keras.layers.SeparableConv2D(32, (3, 3), padding="same")(x_layer1)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.SeparableConv2D(32, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer2 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = tf.keras.layers.SeparableConv2D(64, (3, 3), padding="same")(x_layer2)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.SeparableConv2D(64, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer3 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = tf.keras.layers.SeparableConv2D(128, (3, 3), padding="same")(x_layer3)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.SeparableConv2D(128, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x_layer4 = tf.keras.layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = tf.keras.layers.SeparableConv2D(16, (3, 3), padding="same")(img_inputs)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer1 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = tf.keras.layers.SeparableConv2D(32, (3, 3), padding="same")(s_layer1)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s = tf.keras.layers.SeparableConv2D(32, (3, 3), padding="same")(s)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer2 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = tf.keras.layers.SeparableConv2D(64, (3, 3), padding="same")(s_layer2)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s = tf.keras.layers.SeparableConv2D(64, (3, 3), padding="same")(s)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer3 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = tf.keras.layers.SeparableConv2D(128, (3, 3), padding="same")(s_layer3)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s = tf.keras.layers.SeparableConv2D(128, (3, 3), padding="same")(s)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s_layer4 = tf.keras.layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer4)
s_layer4 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer4)
x_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer4)
x_layer4 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer4)
feat_s1_pre = tf.keras.layers.Multiply()([s_layer4, x_layer4])
feat_s1_pre = tf.keras.layers.Flatten()(feat_s1_pre)
feat_delta_s1 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
delta_s1 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s1"
)(feat_delta_s1)
feat_local_s1 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
local_s1 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage1"
)(feat_local_s1)
feat_pred_s1 = tf.keras.layers.Dense(
self.stage_num[0] * self.num_classes, activation="relu"
)(feat_s1_pre)
pred_a_s1 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[0]))(
feat_pred_s1
)
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer3)
s_layer3 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer3)
x_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer3)
x_layer3 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer3)
feat_s2_pre = tf.keras.layers.Multiply()([s_layer3, x_layer3])
feat_s2_pre = tf.keras.layers.Flatten()(feat_s2_pre)
feat_delta_s2 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
delta_s2 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s2"
)(feat_delta_s2)
feat_local_s2 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
local_s2 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage2"
)(feat_local_s2)
feat_pred_s2 = tf.keras.layers.Dense(
self.stage_num[1] * self.num_classes, activation="relu"
)(feat_s2_pre)
pred_a_s2 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[1]))(
feat_pred_s2
)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer2)
s_layer2 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer2)
x_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer2)
x_layer2 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer2)
feat_s3_pre = tf.keras.layers.Multiply()([s_layer2, x_layer2])
feat_s3_pre = tf.keras.layers.Flatten()(feat_s3_pre)
feat_delta_s3 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
delta_s3 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s3"
)(feat_delta_s3)
feat_local_s3 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
local_s3 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage3"
)(feat_local_s3)
feat_pred_s3 = tf.keras.layers.Dense(
self.stage_num[2] * self.num_classes, activation="relu"
)(feat_s3_pre)
pred_a_s3 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[2]))(
feat_pred_s3
)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_d):
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
# lambda_d = 0.9
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
# a = K.expand_dims(a,-1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
# b = K.expand_dims(b,-1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
# c = K.expand_dims(c,-1)
c = c / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4])) / (s3 * (1 + lambda_d * x[5]))
pred = (a + b + c) * V
return pred
pred_pose = tf.keras.layers.Lambda(
SSR_module,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_d": self.lambda_d,
},
name="pred_pose",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = tf.keras.models.Model(inputs=img_inputs, outputs=pred_pose)
return model
class SSR_net_ori_MT:
def __init__(self, image_size, num_classes, stage_num, lambda_d):
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
img_inputs = tf.keras.layers.Input(self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = tf.keras.layers.Conv2D(32, (3, 3), padding="same")(img_inputs)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer1 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3), padding="same")(x_layer1)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer2 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3), padding="same")(x_layer2)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer3 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3), padding="same")(x_layer3)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x_layer4 = tf.keras.layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = tf.keras.layers.Conv2D(16, (3, 3), padding="same")(img_inputs)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer1 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3), padding="same")(s_layer1)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer2 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3), padding="same")(s_layer2)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer3 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3), padding="same")(s_layer3)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s_layer4 = tf.keras.layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer4)
s_layer4 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer4)
x_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer4)
x_layer4 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer4)
feat_s1_pre = tf.keras.layers.Multiply()([s_layer4, x_layer4])
feat_s1_pre = tf.keras.layers.Flatten()(feat_s1_pre)
feat_delta_s1 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
delta_s1 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s1"
)(feat_delta_s1)
feat_local_s1 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
local_s1 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage1"
)(feat_local_s1)
feat_pred_s1 = tf.keras.layers.Dense(
self.stage_num[0] * self.num_classes, activation="relu"
)(feat_s1_pre)
pred_a_s1 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[0]))(
feat_pred_s1
)
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer3)
s_layer3 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer3)
x_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer3)
x_layer3 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer3)
feat_s2_pre = tf.keras.layers.Multiply()([s_layer3, x_layer3])
feat_s2_pre = tf.keras.layers.Flatten()(feat_s2_pre)
feat_delta_s2 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
delta_s2 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s2"
)(feat_delta_s2)
feat_local_s2 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
local_s2 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage2"
)(feat_local_s2)
feat_pred_s2 = tf.keras.layers.Dense(
self.stage_num[1] * self.num_classes, activation="relu"
)(feat_s2_pre)
pred_a_s2 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[1]))(
feat_pred_s2
)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer2)
s_layer2 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer2)
x_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer2)
x_layer2 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer2)
feat_s3_pre = tf.keras.layers.Multiply()([s_layer2, x_layer2])
feat_s3_pre = tf.keras.layers.Flatten()(feat_s3_pre)
feat_delta_s3 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
delta_s3 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s3"
)(feat_delta_s3)
feat_local_s3 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
local_s3 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage3"
)(feat_local_s3)
feat_pred_s3 = tf.keras.layers.Dense(
self.stage_num[2] * self.num_classes, activation="relu"
)(feat_s3_pre)
pred_a_s3 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[2]))(
feat_pred_s3
)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_d):
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
# lambda_d = 0.9
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
# a = K.expand_dims(a,-1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
# b = K.expand_dims(b,-1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
# c = K.expand_dims(c,-1)
c = c / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4])) / (s3 * (1 + lambda_d * x[5]))
pred = (a + b + c) * V
return pred
pred_pose = tf.keras.layers.Lambda(
SSR_module,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_d": self.lambda_d,
},
name="pred_pose",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = tf.keras.models.Model(inputs=img_inputs, outputs=pred_pose)
return model
| 40.658608
| 131
| 0.568479
| 7,361
| 55,499
| 3.977585
| 0.038038
| 0.077462
| 0.138085
| 0.035657
| 0.857645
| 0.817207
| 0.779979
| 0.746883
| 0.723932
| 0.706923
| 0
| 0.036721
| 0.25514
| 55,499
| 1,364
| 132
| 40.688416
| 0.671545
| 0.083065
| 0
| 0.579792
| 0
| 0
| 0.027921
| 0.001181
| 0
| 0
| 0
| 0.000733
| 0
| 1
| 0.062323
| false
| 0
| 0.007554
| 0.00661
| 0.1322
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ac1d0576b9d96127b532e1ac5e9548932d7f9611
| 39
|
py
|
Python
|
pwas/__init__.py
|
cgreencode/pwas
|
e65901e115491ad9661832c7b622b01b1e81c934
|
[
"MIT"
] | 19
|
2020-06-22T02:39:25.000Z
|
2022-02-21T14:37:33.000Z
|
pwas/__init__.py
|
cgreencode/pwas
|
e65901e115491ad9661832c7b622b01b1e81c934
|
[
"MIT"
] | 5
|
2020-09-28T11:26:01.000Z
|
2021-05-06T15:34:16.000Z
|
pwas/__init__.py
|
cgreencode/pwas
|
e65901e115491ad9661832c7b622b01b1e81c934
|
[
"MIT"
] | 4
|
2020-06-25T18:19:58.000Z
|
2022-01-29T04:02:20.000Z
|
from .genotype import GenotypingManager
| 39
| 39
| 0.897436
| 4
| 39
| 8.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.972222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac3fd84e905bc1166a7d4dcb6bd2d1a33b2c8e12
| 148
|
py
|
Python
|
textutils/pages/views.py
|
sohanur-shanto/Django-Play-With-Text
|
e81177c22e409a584daebd8a826e2aaee14fb59c
|
[
"BSD-3-Clause-Attribution"
] | 2
|
2021-04-09T12:54:26.000Z
|
2021-04-10T07:36:22.000Z
|
textutils/pages/views.py
|
sohanur-shanto/Django-Play-With-Text
|
e81177c22e409a584daebd8a826e2aaee14fb59c
|
[
"BSD-3-Clause-Attribution"
] | null | null | null |
textutils/pages/views.py
|
sohanur-shanto/Django-Play-With-Text
|
e81177c22e409a584daebd8a826e2aaee14fb59c
|
[
"BSD-3-Clause-Attribution"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
def funwithmath(request):
return render (request, 'funwithmath.html')
| 24.666667
| 47
| 0.797297
| 18
| 148
| 6.555556
| 0.666667
| 0.169492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128378
| 148
| 6
| 47
| 24.666667
| 0.914729
| 0
| 0
| 0
| 0
| 0
| 0.107383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
3bc5e3ab47f6373dad23233f3b3391f39ba91b96
| 10,341
|
py
|
Python
|
tests/api/test_predict.py
|
mldock/mldock
|
314b733e4f0102321727f8b145fc276486ecad85
|
[
"Apache-2.0"
] | 2
|
2021-07-12T13:51:21.000Z
|
2021-07-19T08:40:02.000Z
|
tests/api/test_predict.py
|
mldock/mldock
|
314b733e4f0102321727f8b145fc276486ecad85
|
[
"Apache-2.0"
] | 41
|
2021-06-28T11:05:20.000Z
|
2022-03-13T13:48:50.000Z
|
tests/api/test_predict.py
|
mldock/mldock
|
314b733e4f0102321727f8b145fc276486ecad85
|
[
"Apache-2.0"
] | 1
|
2021-07-17T19:07:06.000Z
|
2021-07-17T19:07:06.000Z
|
"""Test Predict API calls"""
import io
from PIL import Image
from dataclasses import dataclass
import tempfile
from pathlib import Path
import pytest
from mock import patch
from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction
import responses
import requests
@pytest.fixture
def image_bytes():
"""reads image as bytes string"""
img = Image.open("tests/api/fixtures/eight.png", mode="r")
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format="PNG")
return img_byte_arr.getvalue()
@dataclass
class MockResponse:
status_code: int
json_data: dict = None
text: str = None
_content: bytes = None
def json(self):
return self.json_data
class TestPredictAPI:
"""
TEST ERROR STATUS_CODE!=200 SCENERIO
"""
@staticmethod
@responses.activate
def test_handle_prediction_send_json_handles_non_200():
responses.add(
responses.POST,
"http://nothing-to-see-here/invocations",
json={"error": "client error"},
status=404,
)
with pytest.raises(requests.exceptions.RequestException):
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=None,
request_content_type="application/json",
response_content_type="application/json",
)
@staticmethod
@responses.activate
def test_handle_prediction_sending_image_jpeg_handles_non_200():
responses.add(
responses.POST,
"http://nothing-to-see-here/invocations",
json={"error": "client error"},
status=404,
)
with pytest.raises(requests.exceptions.RequestException):
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/eight.png",
response_file=None,
request_content_type="image/jpeg",
response_content_type="application/json",
)
@staticmethod
@responses.activate
def test_handle_prediction_sending_text_csv_handles_non_200():
responses.add(
responses.POST,
"http://nothing-to-see-here/invocations",
json={"error": "client error"},
status=404,
)
with pytest.raises(requests.exceptions.RequestException):
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.csv",
response_file=None,
request_content_type="text/csv",
response_content_type="application/json",
)
"""
TEST SUCCESS STATUS_CODE=200 SCENERIO
"""
@staticmethod
def test_handle_prediction_send_json_success_200():
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
json_data={"result": "success"}, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=None,
request_content_type="application/json",
response_content_type="application/json",
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {"Content-Type": "application/json"},
}
_, kwargs = list(mock_execute_request.call_args)
data_obj = kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
assert isinstance(data_obj, str), "Failure. Expected str json object."
@staticmethod
def test_handle_prediction_sending_image_jpeg_success_200(image_bytes):
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
_content=image_bytes, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/eight.png",
response_file=None,
request_content_type="image/jpeg",
response_content_type="image/jpeg",
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {"Content-Type": "image/jpeg"},
}
_, kwargs = list(mock_execute_request.call_args)
data_obj = kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
assert isinstance(
data_obj, io.BytesIO
), "Failure. Expected io.BytesIO object."
@staticmethod
def test_handle_prediction_sending_text_csv_success_200():
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
text="greet,name\nhello,sam", status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.csv",
response_file=None,
request_content_type="text/csv",
response_content_type="text/csv",
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {"Content-Type": "text/csv"},
}
_, kwargs = list(mock_execute_request.call_args)
data_obj = kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
assert isinstance(data_obj, str), "Failure. Expected str json object."
"""
TEST WRITING RESPONSE TO FILE SCENERIO
"""
@staticmethod
def test_handle_prediction_send_json_success_write_response_file():
with tempfile.TemporaryDirectory() as tmp_dir:
response_filepath = Path(tmp_dir, "response.json")
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
json_data={"result": "success"}, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=response_filepath,
request_content_type="application/json",
response_content_type="application/json",
)
assert (
response_filepath.is_file()
), "Failure. outputfile was not created"
@staticmethod
def test_handle_prediction_sending_image_jpeg_success_write_response_file(
image_bytes,
):
with tempfile.TemporaryDirectory() as tmp_dir:
response_filepath = Path(tmp_dir, "response.png")
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
_content=image_bytes, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/eight.png",
response_file=response_filepath,
request_content_type="image/jpeg",
response_content_type="image/jpeg",
)
assert (
response_filepath.is_file()
), "Failure. outputfile was not created"
@staticmethod
def test_handle_prediction_sending_text_csv_success_write_response_file():
with tempfile.TemporaryDirectory() as tmp_dir:
response_filepath = Path(tmp_dir, "response.csv")
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
text="greet,name\nhello,sam", status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.csv",
response_file=response_filepath,
request_content_type="text/csv",
response_content_type="text/csv",
)
assert (
response_filepath.is_file()
), "Failure. outputfile was not created"
"""
TEST ADDING ADDTIONAL HEADERS
"""
@staticmethod
def test_handle_prediction_send_json_success_add_headers():
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
json_data={"result": "success"}, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=None,
request_content_type="application/json",
response_content_type="application/json",
headers={"Authentication": "bearer 12345"},
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {
"Content-Type": "application/json",
"Authentication": "bearer 12345",
},
}
_, kwargs = list(mock_execute_request.call_args)
kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
| 35.782007
| 86
| 0.579634
| 1,010
| 10,341
| 5.667327
| 0.128713
| 0.061146
| 0.056604
| 0.047519
| 0.87037
| 0.857617
| 0.856219
| 0.828616
| 0.819881
| 0.77935
| 0
| 0.009142
| 0.322986
| 10,341
| 288
| 87
| 35.90625
| 0.808456
| 0.008413
| 0
| 0.668122
| 0
| 0
| 0.218998
| 0.060215
| 0
| 0
| 0
| 0
| 0.043668
| 1
| 0.052402
| false
| 0
| 0.043668
| 0.004367
| 0.131004
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3bd9caeeddf847dd9546e4e833234ce3cce7f394
| 28
|
py
|
Python
|
patton_server/service/__init__.py
|
directionless/patton-server
|
da39cb8b09029dbcf4edd5c78abb150dc53e8ebe
|
[
"Apache-2.0"
] | null | null | null |
patton_server/service/__init__.py
|
directionless/patton-server
|
da39cb8b09029dbcf4edd5c78abb150dc53e8ebe
|
[
"Apache-2.0"
] | null | null | null |
patton_server/service/__init__.py
|
directionless/patton-server
|
da39cb8b09029dbcf4edd5c78abb150dc53e8ebe
|
[
"Apache-2.0"
] | null | null | null |
from .make_web_app import *
| 14
| 27
| 0.785714
| 5
| 28
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.