hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e8fda128e2efa4b244ac1cf7137a78a40bc85cf6
| 18
|
py
|
Python
|
Lib/test/test_compiler/testcorpus/03_list_ex.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 1,886
|
2021-05-03T23:58:43.000Z
|
2022-03-31T19:15:58.000Z
|
Lib/test/test_compiler/testcorpus/03_list_ex.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 70
|
2021-05-04T23:25:35.000Z
|
2022-03-31T18:42:08.000Z
|
Lib/test/test_compiler/testcorpus/03_list_ex.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 52
|
2021-05-04T21:26:03.000Z
|
2022-03-08T18:02:56.000Z
|
[a, *b, *d, a, c]
| 9
| 17
| 0.277778
| 5
| 18
| 1
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 18
| 1
| 18
| 18
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3302a01c98ac3067b3004d4adaf059a5754a7cb4
| 40,517
|
py
|
Python
|
covid_epidemiology/src/models/definitions/us_model_definitions_test.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
covid_epidemiology/src/models/definitions/us_model_definitions_test.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
covid_epidemiology/src/models/definitions/us_model_definitions_test.py
|
admariner/google-research
|
7cee4b22b925581d912e8d993625c180da2a5a4f
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for models.definitions.us_model_definitions."""
import unittest
import numpy as np
import pandas as pd
from covid_epidemiology.src import constants
from covid_epidemiology.src.models.definitions import us_model_definitions
class TestStateModelDefinition(unittest.TestCase):
def test_get_ts_features(self):
expected_ts_features = {
constants.DEATH:
constants.JHU_DEATH_FEATURE_KEY,
constants.CONFIRMED:
constants.JHU_CONFIRMED_FEATURE_KEY,
constants.RECOVERED_DOC:
constants.RECOVERED_FEATURE_KEY,
constants.HOSPITALIZED:
constants.HOSPITALIZED_FEATURE_KEY,
constants.HOSPITALIZED_INCREASE:
constants.HOSPITALIZED_INCREASE_FEATURE_KEY,
constants.ICU:
constants.ICU_FEATURE_KEY,
constants.VENTILATOR:
constants.VENTILATOR_FEATURE_KEY,
constants.MOBILITY_INDEX:
constants.MOBILITY_INDEX,
constants.MOBILITY_SAMPLES:
constants.MOBILITY_SAMPLES,
constants.TOTAL_TESTS:
constants.TOTAL_TESTS,
constants.AMP_RESTAURANTS:
constants.AMP_RESTAURANTS,
constants.AMP_NON_ESSENTIAL_BUSINESS:
constants.AMP_NON_ESSENTIAL_BUSINESS,
constants.AMP_STAY_AT_HOME:
constants.AMP_STAY_AT_HOME,
constants.AMP_SCHOOLS_SECONDARY_EDUCATION:
constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
constants.AMP_EMERGENCY_DECLARATION:
constants.AMP_EMERGENCY_DECLARATION,
constants.AMP_GATHERINGS:
constants.AMP_GATHERINGS,
constants.AMP_FACE_MASKS:
constants.AMP_FACE_MASKS,
constants.DOW_WINDOW:
constants.DOW_WINDOW,
constants.AVERAGE_TEMPERATURE:
constants.AVERAGE_TEMPERATURE,
constants.MAX_TEMPERATURE:
constants.MAX_TEMPERATURE,
constants.MIN_TEMPERATURE:
constants.MIN_TEMPERATURE,
constants.RAINFALL:
constants.RAINFALL,
constants.SNOWFALL:
constants.SNOWFALL,
constants.COMMERCIAL_SCORE:
constants.COMMERCIAL_SCORE,
constants.ANTIGEN_POSITIVE:
constants.ANTIGEN_POSITIVE,
constants.ANTIGEN_TOTAL:
constants.ANTIGEN_TOTAL,
constants.ANTIBODY_NEGATIVE:
constants.ANTIBODY_NEGATIVE,
constants.ANTIBODY_TOTAL:
constants.ANTIBODY_TOTAL,
constants.SYMPTOM_COUGH:
constants.SYMPTOM_COUGH,
constants.SYMPTOM_CHILLS:
constants.SYMPTOM_CHILLS,
constants.SYMPTOM_ANOSMIA:
constants.SYMPTOM_ANOSMIA,
constants.SYMPTOM_INFECTION:
constants.SYMPTOM_INFECTION,
constants.SYMPTOM_CHEST_PAIN:
constants.SYMPTOM_CHEST_PAIN,
constants.SYMPTOM_FEVER:
constants.SYMPTOM_FEVER,
constants.SYMPTOM_SHORTNESSBREATH:
constants.SYMPTOM_SHORTNESSBREATH,
constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL:
constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL:
constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
}
state_model = us_model_definitions.StateModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_ts_features = state_model.get_ts_features()
np.testing.assert_equal(expected_ts_features, actual_ts_features)
def test_get_ts_features_to_preprocess(self):
expected_ts_features = {
constants.MOBILITY_INDEX,
constants.MOBILITY_SAMPLES,
constants.AMP_RESTAURANTS,
constants.AMP_NON_ESSENTIAL_BUSINESS,
constants.AMP_STAY_AT_HOME,
constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
constants.AMP_EMERGENCY_DECLARATION,
constants.AMP_GATHERINGS,
constants.AMP_FACE_MASKS,
constants.CONFIRMED_PER_TESTS,
constants.DEATH_PREPROCESSED,
constants.CONFIRMED_PREPROCESSED,
constants.DOW_WINDOW,
constants.TOTAL_TESTS_PER_CAPITA,
constants.TOTAL_TESTS,
constants.AVERAGE_TEMPERATURE,
constants.MAX_TEMPERATURE,
constants.MIN_TEMPERATURE,
constants.RAINFALL,
constants.SNOWFALL,
constants.COMMERCIAL_SCORE,
constants.ANTIGEN_POSITIVE_RATIO,
constants.ANTIBODY_NEGATIVE_RATIO,
constants.SYMPTOM_COUGH,
constants.SYMPTOM_CHILLS,
constants.SYMPTOM_ANOSMIA,
constants.SYMPTOM_INFECTION,
constants.SYMPTOM_CHEST_PAIN,
constants.SYMPTOM_FEVER,
constants.SYMPTOM_SHORTNESSBREATH,
constants.VACCINATED_RATIO_FIRST_DOSE_PER_DAY_PREPROCESSED,
constants.VACCINATED_RATIO_SECOND_DOSE_PER_DAY_PREPROCESSED,
}
state_model = us_model_definitions.StateModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_ts_features = state_model.get_ts_features_to_preprocess()
np.testing.assert_equal(expected_ts_features, actual_ts_features)
def test_extract_ts_state_features(self):
ts_data = pd.DataFrame([
{
"feature_name": constants.JHU_CONFIRMED_FEATURE_KEY,
"feature_value": 100,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.JHU_CONFIRMED_FEATURE_KEY,
"feature_value": 200,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.JHU_DEATH_FEATURE_KEY,
"feature_value": 10,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.JHU_DEATH_FEATURE_KEY,
"feature_value": float("nan"), # Not populated should ffill to 10.
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.HOSPITALIZED_FEATURE_KEY,
"feature_value": 100,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.HOSPITALIZED_FEATURE_KEY,
"feature_value": 200,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.ICU_FEATURE_KEY,
"feature_value": 2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.ICU_FEATURE_KEY,
"feature_value": 5,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.VENTILATOR_FEATURE_KEY,
"feature_value": 50,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VENTILATOR_FEATURE_KEY,
"feature_value": 100,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_INDEX,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_INDEX,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_SAMPLES,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_SAMPLES,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.TOTAL_TESTS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.TOTAL_TESTS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_GATHERINGS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_GATHERINGS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_EMERGENCY_DECLARATION,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_EMERGENCY_DECLARATION,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_RESTAURANTS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_RESTAURANTS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_NON_ESSENTIAL_BUSINESS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_NON_ESSENTIAL_BUSINESS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_STAY_AT_HOME,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_STAY_AT_HOME,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_FACE_MASKS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_FACE_MASKS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AVERAGE_TEMPERATURE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AVERAGE_TEMPERATURE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.MAX_TEMPERATURE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MAX_TEMPERATURE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.MIN_TEMPERATURE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MIN_TEMPERATURE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.RAINFALL,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.RAINFALL,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SNOWFALL,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SNOWFALL,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.COMMERCIAL_SCORE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.COMMERCIAL_SCORE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIGEN_POSITIVE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIGEN_POSITIVE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIGEN_TOTAL,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIGEN_TOTAL,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIBODY_NEGATIVE,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIBODY_NEGATIVE,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIBODY_TOTAL,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.ANTIBODY_TOTAL,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.RECOVERED_FEATURE_KEY,
"feature_value": 12,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.RECOVERED_FEATURE_KEY,
"feature_value": 11,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.HOSPITALIZED_INCREASE_FEATURE_KEY,
"feature_value": 16,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.HOSPITALIZED_INCREASE_FEATURE_KEY,
"feature_value": 14,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_COUGH,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_COUGH,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_CHILLS,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_CHILLS,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_ANOSMIA,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_ANOSMIA,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_INFECTION,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_INFECTION,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_CHEST_PAIN,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_CHEST_PAIN,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_FEVER,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_FEVER,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_SHORTNESSBREATH,
"feature_value": 0.6,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.SYMPTOM_SHORTNESSBREATH,
"feature_value": 0.7,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
"feature_value": 10,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
"feature_value": 20,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
"feature_value": 5,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
"feature_value": 10,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
])
static_data = pd.DataFrame([{
"feature_name": constants.AQI_MEAN,
"feature_value": 105,
"geo_id": "4059"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4058"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4059"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 120,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 100,
"geo_id": "4059"
}, {
"feature_name": constants.POPULATION,
"feature_value": 70,
"geo_id": "4059"
}, {
"feature_name": constants.POPULATION,
"feature_value": 50,
"geo_id": "4058"
}, {
"feature_name": constants.POPULATION,
"feature_value": 10,
"geo_id": "4057"
}])
state_model = us_model_definitions.StateModelDefinition(gt_source="JHU")
static_features, _ = state_model._extract_static_features(
static_data=static_data, locations=["4059"])
actual, _ = state_model._extract_ts_features(
ts_data=ts_data,
static_features=static_features,
locations=["4059"],
training_window_size=2)
expected = {
constants.CONFIRMED: {
"4059": np.array([100, 200], dtype="float32")
},
constants.DEATH: {
"4059": [10, np.nan]
},
constants.DEATH_PREPROCESSED: {
"4059": [0, 0]
},
constants.ICU: {
"4059": np.array([2, 5], dtype="float32")
},
constants.INFECTED: None,
constants.HOSPITALIZED: {
"4059": np.array([100, 200], dtype="float32")
},
constants.MOBILITY_INDEX: {
"4059": np.array([1, 0], dtype="float32")
},
constants.VENTILATOR: {
"4059": np.array([50, 100], dtype="float32")
},
constants.RECOVERED_DOC: {
"4059": np.array([11, 12], dtype="float32")
},
constants.HOSPITALIZED_INCREASE: {
"4059": np.array([14, 16], dtype="float32")
},
constants.HOSPITALIZED_CUMULATIVE: {
"4059": np.array([14, 30], dtype="float32")
},
constants.TOTAL_TESTS_PER_CAPITA: {
"4059": np.array([1, 0], dtype="float32")
},
}
for ts_feature_name in expected:
self.assertIn(ts_feature_name, actual)
np.testing.assert_equal(
actual[ts_feature_name], expected[ts_feature_name],
"Feature name {} is not aligned.".format(ts_feature_name))
def test_get_static_features(self):
expected_static_features = {
constants.POPULATION:
constants.POPULATION,
constants.INCOME_PER_CAPITA:
constants.INCOME_PER_CAPITA,
constants.POPULATION_DENSITY_PER_SQKM:
constants.POPULATION_DENSITY_PER_SQKM,
constants.HOUSEHOLD_FOOD_STAMP:
constants.HOUSEHOLD_FOOD_STAMP,
constants.KAISER_POPULATION:
constants.KAISER_POPULATION,
constants.KAISER_60P_POPULATION:
constants.KAISER_60P_POPULATION,
constants.ICU_BEDS:
constants.ICU_BEDS,
constants.HOUSEHOLDS:
constants.HOUSEHOLDS,
constants.HOSPITAL_RATING1:
constants.HOSPITAL_RATING1,
constants.HOSPITAL_RATING2:
constants.HOSPITAL_RATING2,
constants.HOSPITAL_RATING3:
constants.HOSPITAL_RATING3,
constants.HOSPITAL_RATING4:
constants.HOSPITAL_RATING4,
constants.HOSPITAL_RATING5:
constants.HOSPITAL_RATING5,
constants.AQI_MEAN:
constants.AQI_MEAN,
constants.NON_EMERGENCY_SERVICES:
constants.NON_EMERGENCY_SERVICES,
constants.EMERGENCY_SERVICES:
constants.EMERGENCY_SERVICES,
constants.HOSPITAL_ACUTE_CARE:
constants.HOSPITAL_ACUTE_CARE,
constants.CRITICAL_ACCESS_HOSPITAL:
constants.CRITICAL_ACCESS_HOSPITAL,
constants.PATIENCE_EXPERIENCE_SAME:
constants.PATIENCE_EXPERIENCE_SAME,
constants.PATIENCE_EXPERIENCE_BELOW:
constants.PATIENCE_EXPERIENCE_BELOW,
constants.PATIENCE_EXPERIENCE_ABOVE:
constants.PATIENCE_EXPERIENCE_ABOVE,
}
state_model = us_model_definitions.StateModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_static_features = state_model.get_static_features()
np.testing.assert_equal(expected_static_features, actual_static_features)
def test_extract_state_static_features(self):
static_data = pd.DataFrame([{
"feature_name": constants.AQI_MEAN,
"feature_value": 105,
"geo_id": "4059"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4058"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4059"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 120,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 100,
"geo_id": "4059"
}, {
"feature_name": constants.POPULATION,
"feature_value": 70,
"geo_id": "4059"
}, {
"feature_name": constants.POPULATION,
"feature_value": 50,
"geo_id": "4058"
}, {
"feature_name": constants.POPULATION,
"feature_value": 10,
"geo_id": "4057"
}])
state_model = us_model_definitions.StateModelDefinition(gt_source="JHU")
actual, _ = state_model._extract_static_features(
static_data=static_data, locations=["4059", "4058"])
expected = {
constants.AQI_MEAN: {
"4059": 0,
"4058": 0
},
constants.INCOME_PER_CAPITA: {
"4059": 0,
"4058": 1
},
constants.POPULATION: {
"4059": 70,
"4058": 50
},
constants.POPULATION_DENSITY_PER_SQKM: {
"4059": 0,
"4058": 0
},
}
for static_feature_name in expected:
self.assertEqual(actual[static_feature_name],
expected[static_feature_name])
class TestCountyModelDefinition(unittest.TestCase):
def test_get_ts_features(self):
expected_ts_features = {
constants.DEATH:
constants.JHU_COUNTY_DEATH_FEATURE_KEY,
constants.CONFIRMED:
constants.JHU_COUNTY_CONFIRMED_FEATURE_KEY,
constants.RECOVERED_DOC:
constants.CSRP_RECOVERED_FEATURE_KEY,
constants.HOSPITALIZED:
constants.CHA_HOSPITALIZED_FEATURE_KEY,
constants.HOSPITALIZED_CUMULATIVE:
constants.CHA_HOSPITALIZED_CUMULATIVE_FEATURE_KEY,
constants.ICU:
constants.CSRP_ICU_FEATURE_KEY,
constants.MOBILITY_INDEX:
constants.MOBILITY_INDEX,
constants.MOBILITY_SAMPLES:
constants.MOBILITY_SAMPLES,
constants.CSRP_TESTS:
constants.CSRP_TESTS,
constants.AMP_RESTAURANTS:
constants.AMP_RESTAURANTS,
constants.AMP_NON_ESSENTIAL_BUSINESS:
constants.AMP_NON_ESSENTIAL_BUSINESS,
constants.AMP_STAY_AT_HOME:
constants.AMP_STAY_AT_HOME,
constants.AMP_SCHOOLS_SECONDARY_EDUCATION:
constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
constants.AMP_EMERGENCY_DECLARATION:
constants.AMP_EMERGENCY_DECLARATION,
constants.AMP_GATHERINGS:
constants.AMP_GATHERINGS,
constants.AMP_FACE_MASKS:
constants.AMP_FACE_MASKS,
constants.DOW_WINDOW:
constants.DOW_WINDOW,
constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL:
constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL:
constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
}
county_model = us_model_definitions.CountyModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_ts_features = county_model.get_ts_features()
np.testing.assert_equal(expected_ts_features, actual_ts_features)
def test_get_ts_features_to_preprocess(self):
expected_ts_features = {
constants.MOBILITY_INDEX,
constants.MOBILITY_SAMPLES,
constants.CSRP_TESTS,
constants.CONFIRMED_PER_CSRP_TESTS,
constants.TOTAL_TESTS_PER_CAPITA,
constants.AMP_RESTAURANTS,
constants.AMP_NON_ESSENTIAL_BUSINESS,
constants.AMP_STAY_AT_HOME,
constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
constants.AMP_EMERGENCY_DECLARATION,
constants.AMP_GATHERINGS,
constants.AMP_FACE_MASKS,
constants.DEATH_PREPROCESSED,
constants.CONFIRMED_PREPROCESSED,
constants.DOW_WINDOW,
constants.TOTAL_TESTS_PER_CAPITA,
constants.VACCINATED_RATIO_FIRST_DOSE_PER_DAY_PREPROCESSED,
constants.VACCINATED_RATIO_SECOND_DOSE_PER_DAY_PREPROCESSED,
}
county_model = us_model_definitions.CountyModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_ts_features = county_model.get_ts_features_to_preprocess()
np.testing.assert_equal(expected_ts_features, actual_ts_features)
def test_extract_ts_county_features(self):
ts_data = pd.DataFrame([
{
"feature_name": "confirmed_cases",
"feature_value": 100,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": "confirmed_cases",
"feature_value": 200,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": "deaths",
"feature_value": 10,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": "deaths",
"feature_value": 13,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_INDEX,
"feature_value": 0.0,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_INDEX,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_SAMPLES,
"feature_value": 10,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.MOBILITY_SAMPLES,
"feature_value": 12,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.CSRP_TESTS,
"feature_value": 70,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.CSRP_TESTS,
"feature_value": 140,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_GATHERINGS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_GATHERINGS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_EMERGENCY_DECLARATION,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_EMERGENCY_DECLARATION,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_SCHOOLS_SECONDARY_EDUCATION,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_RESTAURANTS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_RESTAURANTS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_NON_ESSENTIAL_BUSINESS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_NON_ESSENTIAL_BUSINESS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_STAY_AT_HOME,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_STAY_AT_HOME,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_FACE_MASKS,
"feature_value": 1.0,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.AMP_FACE_MASKS,
"feature_value": 1.2,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.CSRP_RECOVERED_FEATURE_KEY,
"feature_value": 12,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059",
},
{
"feature_name": constants.CSRP_RECOVERED_FEATURE_KEY,
"feature_value": 11,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059",
},
{
"feature_name": constants.CHA_HOSPITALIZED_FEATURE_KEY,
"feature_value": 100,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059",
},
{
"feature_name": constants.CHA_HOSPITALIZED_FEATURE_KEY,
"feature_value": 200,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059",
},
{
"feature_name": constants.CHA_HOSPITALIZED_CUMULATIVE_FEATURE_KEY,
"feature_value": 200,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059",
},
{
"feature_name": constants.CHA_HOSPITALIZED_CUMULATIVE_FEATURE_KEY,
"feature_value": 300,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059",
},
{
"feature_name": constants.CSRP_ICU_FEATURE_KEY,
"feature_value": 20,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059",
},
{
"feature_name": constants.CSRP_ICU_FEATURE_KEY,
"feature_value": 30,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059",
},
{
"feature_name": constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
"feature_value": 10,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL,
"feature_value": 20,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
"feature_value": 5,
"dt": np.datetime64("2020-01-22"),
"geo_id": "4059"
},
{
"feature_name": constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL,
"feature_value": 10,
"dt": np.datetime64("2020-01-23"),
"geo_id": "4059"
},
])
static_data = pd.DataFrame([{
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4059"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 120,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 100,
"geo_id": "4059"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 70,
"geo_id": "4059"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 50,
"geo_id": "4058"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 10,
"geo_id": "4057"
}])
state_model = us_model_definitions.CountyModelDefinition(
gt_source="USAFACTS")
static_features, _ = state_model._extract_static_features(
static_data=static_data, locations=["4059"])
actual, _ = state_model._extract_ts_features(
ts_data=ts_data,
static_features=static_features,
locations=["4059"],
training_window_size=2)
expected = {
constants.DEATH: {
"4059": np.array([10, 13], dtype="float32")
},
constants.CONFIRMED: {
"4059": np.array([100, 200], dtype="float32")
},
constants.MOBILITY_SAMPLES: {
"4059": np.array([0, 1], dtype="float32")
},
constants.MOBILITY_INDEX: {
"4059": np.array([0, 1], dtype="float32")
},
constants.CSRP_TESTS: {
"4059": np.array([0, 1], dtype="float32")
},
constants.RECOVERED_DOC: {
"4059": np.array([11, 12], dtype="float32"),
},
constants.HOSPITALIZED: {
"4059": np.array([100, 200], dtype="float32"),
},
constants.HOSPITALIZED_CUMULATIVE: {
"4059": np.array([200, 300], dtype="float32"),
},
constants.ICU: {
"4059": np.array([20, 30], dtype="float32"),
},
constants.TOTAL_TESTS_PER_CAPITA: {
"4059": np.array([0, 0], dtype="float32"),
},
}
for ts_feature_name in expected:
self.assertIn(ts_feature_name, actual)
np.testing.assert_equal(
actual[ts_feature_name], expected[ts_feature_name],
"Unexpected value for feature %s" % ts_feature_name)
def test_get_static_features(self):
county_model = us_model_definitions.CountyModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_static_features = county_model.get_static_features()
self.assertEqual(len(actual_static_features), 51)
def test_get_all_locations(self):
input_df = pd.DataFrame(
{constants.GEO_ID_COLUMN: ["4059", "4060", "4061", "4062"]})
# Exclude FIPS 15005 (Kalawao County, no longer exist)
expected_locations = {"4059", "4060", "4061", "4062"}
county_model = us_model_definitions.CountyModelDefinition(
gt_source=constants.GT_SOURCE_JHU)
actual_locations = county_model.get_all_locations(input_df)
np.testing.assert_equal(expected_locations, actual_locations)
def test_extract_county_static_features(self):
static_data = pd.DataFrame([{
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4059"
}, {
"feature_name": constants.AREA,
"feature_value": 10,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 120,
"geo_id": "4058"
}, {
"feature_name": constants.INCOME_PER_CAPITA,
"feature_value": 100,
"geo_id": "4059"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 70,
"geo_id": "4059"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 50,
"geo_id": "4058"
}, {
"feature_name": constants.COUNTY_POPULATION,
"feature_value": 10,
"geo_id": "4057"
}])
county_model = us_model_definitions.CountyModelDefinition(gt_source="JHU")
actual, _ = county_model._extract_static_features(
static_data=static_data, locations=["4059", "4058"])
expected = {
constants.INCOME_PER_CAPITA: {
"4059": 0,
"4058": 1
},
constants.POPULATION: {
"4059": 70,
"4058": 50
}
}
for static_feature_name in expected:
self.assertEqual(actual[static_feature_name],
expected[static_feature_name],
"Unexpected value for feature %s" % static_feature_name)
if __name__ == "__main__":
unittest.main()
| 33.02119
| 79
| 0.549991
| 4,041
| 40,517
| 5.195744
| 0.069042
| 0.08173
| 0.127643
| 0.091446
| 0.904744
| 0.860497
| 0.831539
| 0.800819
| 0.768194
| 0.750762
| 0
| 0.083872
| 0.327591
| 40,517
| 1,226
| 80
| 33.048124
| 0.686793
| 0.017696
| 0
| 0.655905
| 0
| 0
| 0.166369
| 0
| 0
| 0
| 0
| 0
| 0.011045
| 1
| 0.009346
| false
| 0
| 0.004248
| 0
| 0.015293
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
331428924c4ed8d0a00e88d52d47d78f60ddad84
| 18,059
|
py
|
Python
|
sdk/python/pulumi_rancher2/auth_config_free_ipa.py
|
mitchellmaler/pulumi-rancher2
|
e6ca44b58b5b10c12a4e628e61aa8d98330f0863
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_rancher2/auth_config_free_ipa.py
|
mitchellmaler/pulumi-rancher2
|
e6ca44b58b5b10c12a4e628e61aa8d98330f0863
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_rancher2/auth_config_free_ipa.py
|
mitchellmaler/pulumi-rancher2
|
e6ca44b58b5b10c12a4e628e61aa8d98330f0863
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class AuthConfigFreeIpa(pulumi.CustomResource):
access_mode: pulumi.Output[str]
"""
Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
"""
allowed_principal_ids: pulumi.Output[list]
"""
Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `freeipa_user://<DN>` `freeipa_group://<DN>` (list)
"""
annotations: pulumi.Output[dict]
"""
Annotations of the resource (map)
"""
certificate: pulumi.Output[str]
"""
Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
"""
connection_timeout: pulumi.Output[float]
"""
FreeIpa connection timeout. Default `5000` (int)
"""
enabled: pulumi.Output[bool]
"""
Enable auth config provider. Default `true` (bool)
"""
group_dn_attribute: pulumi.Output[str]
"""
Group DN attribute. Default `entryDN` (string)
"""
group_member_mapping_attribute: pulumi.Output[str]
"""
Group member mapping attribute. Default `member` (string)
"""
group_member_user_attribute: pulumi.Output[str]
"""
Group member user attribute. Default `entryDN` (string)
"""
group_name_attribute: pulumi.Output[str]
"""
Group name attribute. Default `cn` (string)
"""
group_object_class: pulumi.Output[str]
"""
Group object class. Default `groupOfNames` (string)
"""
group_search_attribute: pulumi.Output[str]
"""
Group search attribute. Default `cn` (string)
"""
group_search_base: pulumi.Output[str]
"""
Group search base (string)
"""
labels: pulumi.Output[dict]
"""
Labels of the resource (map)
"""
name: pulumi.Output[str]
"""
(Computed) The name of the resource (string)
"""
nested_group_membership_enabled: pulumi.Output[bool]
"""
Nested group membership enable. Default `false` (bool)
"""
port: pulumi.Output[float]
"""
FreeIpa port. Default `389` (int)
"""
servers: pulumi.Output[list]
"""
FreeIpa servers list (list)
"""
service_account_distinguished_name: pulumi.Output[str]
"""
Service account DN for access FreeIpa service (string)
"""
service_account_password: pulumi.Output[str]
"""
Service account password for access FreeIpa service (string)
"""
tls: pulumi.Output[bool]
"""
Enable TLS connection (bool)
"""
type: pulumi.Output[str]
"""
(Computed) The type of the resource (string)
"""
user_disabled_bit_mask: pulumi.Output[float]
"""
User disabled bit mask (int)
"""
user_enabled_attribute: pulumi.Output[str]
"""
User enable attribute (string)
"""
user_login_attribute: pulumi.Output[str]
"""
User login attribute. Default `uid` (string)
"""
user_member_attribute: pulumi.Output[str]
"""
User member attribute. Default `memberOf` (string)
"""
user_name_attribute: pulumi.Output[str]
"""
User name attribute. Default `givenName` (string)
"""
user_object_class: pulumi.Output[str]
"""
User object class. Default `inetorgperson` (string)
"""
user_search_attribute: pulumi.Output[str]
"""
User search attribute. Default `uid|sn|givenName` (string)
"""
user_search_base: pulumi.Output[str]
"""
User search base DN (string)
"""
def __init__(__self__, resource_name, opts=None, access_mode=None, allowed_principal_ids=None, annotations=None, certificate=None, connection_timeout=None, enabled=None, group_dn_attribute=None, group_member_mapping_attribute=None, group_member_user_attribute=None, group_name_attribute=None, group_object_class=None, group_search_attribute=None, group_search_base=None, labels=None, nested_group_membership_enabled=None, port=None, servers=None, service_account_distinguished_name=None, service_account_password=None, tls=None, user_disabled_bit_mask=None, user_enabled_attribute=None, user_login_attribute=None, user_member_attribute=None, user_name_attribute=None, user_object_class=None, user_search_attribute=None, user_search_base=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Rancher v2 Auth Config FreeIpa resource. This can be used to configure and enable Auth Config FreeIpa for Rancher v2 RKE clusters and retrieve their information.
In addition to the built-in local auth, only one external auth config provider can be enabled at a time.
> This content is derived from https://github.com/terraform-providers/terraform-provider-rancher2/blob/master/website/docs/r/authConfigFreeIpa.html.markdown.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_mode: Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
:param pulumi.Input[list] allowed_principal_ids: Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `freeipa_user://<DN>` `freeipa_group://<DN>` (list)
:param pulumi.Input[dict] annotations: Annotations of the resource (map)
:param pulumi.Input[str] certificate: Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
:param pulumi.Input[float] connection_timeout: FreeIpa connection timeout. Default `5000` (int)
:param pulumi.Input[bool] enabled: Enable auth config provider. Default `true` (bool)
:param pulumi.Input[str] group_dn_attribute: Group DN attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_member_mapping_attribute: Group member mapping attribute. Default `member` (string)
:param pulumi.Input[str] group_member_user_attribute: Group member user attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_name_attribute: Group name attribute. Default `cn` (string)
:param pulumi.Input[str] group_object_class: Group object class. Default `groupOfNames` (string)
:param pulumi.Input[str] group_search_attribute: Group search attribute. Default `cn` (string)
:param pulumi.Input[str] group_search_base: Group search base (string)
:param pulumi.Input[dict] labels: Labels of the resource (map)
:param pulumi.Input[bool] nested_group_membership_enabled: Nested group membership enable. Default `false` (bool)
:param pulumi.Input[float] port: FreeIpa port. Default `389` (int)
:param pulumi.Input[list] servers: FreeIpa servers list (list)
:param pulumi.Input[str] service_account_distinguished_name: Service account DN for access FreeIpa service (string)
:param pulumi.Input[str] service_account_password: Service account password for access FreeIpa service (string)
:param pulumi.Input[bool] tls: Enable TLS connection (bool)
:param pulumi.Input[float] user_disabled_bit_mask: User disabled bit mask (int)
:param pulumi.Input[str] user_enabled_attribute: User enable attribute (string)
:param pulumi.Input[str] user_login_attribute: User login attribute. Default `uid` (string)
:param pulumi.Input[str] user_member_attribute: User member attribute. Default `memberOf` (string)
:param pulumi.Input[str] user_name_attribute: User name attribute. Default `givenName` (string)
:param pulumi.Input[str] user_object_class: User object class. Default `inetorgperson` (string)
:param pulumi.Input[str] user_search_attribute: User search attribute. Default `uid|sn|givenName` (string)
:param pulumi.Input[str] user_search_base: User search base DN (string)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['access_mode'] = access_mode
__props__['allowed_principal_ids'] = allowed_principal_ids
__props__['annotations'] = annotations
__props__['certificate'] = certificate
__props__['connection_timeout'] = connection_timeout
__props__['enabled'] = enabled
__props__['group_dn_attribute'] = group_dn_attribute
__props__['group_member_mapping_attribute'] = group_member_mapping_attribute
__props__['group_member_user_attribute'] = group_member_user_attribute
__props__['group_name_attribute'] = group_name_attribute
__props__['group_object_class'] = group_object_class
__props__['group_search_attribute'] = group_search_attribute
__props__['group_search_base'] = group_search_base
__props__['labels'] = labels
__props__['nested_group_membership_enabled'] = nested_group_membership_enabled
__props__['port'] = port
if servers is None:
raise TypeError("Missing required property 'servers'")
__props__['servers'] = servers
if service_account_distinguished_name is None:
raise TypeError("Missing required property 'service_account_distinguished_name'")
__props__['service_account_distinguished_name'] = service_account_distinguished_name
if service_account_password is None:
raise TypeError("Missing required property 'service_account_password'")
__props__['service_account_password'] = service_account_password
__props__['tls'] = tls
__props__['user_disabled_bit_mask'] = user_disabled_bit_mask
__props__['user_enabled_attribute'] = user_enabled_attribute
__props__['user_login_attribute'] = user_login_attribute
__props__['user_member_attribute'] = user_member_attribute
__props__['user_name_attribute'] = user_name_attribute
__props__['user_object_class'] = user_object_class
__props__['user_search_attribute'] = user_search_attribute
if user_search_base is None:
raise TypeError("Missing required property 'user_search_base'")
__props__['user_search_base'] = user_search_base
__props__['name'] = None
__props__['type'] = None
super(AuthConfigFreeIpa, __self__).__init__(
'rancher2:index/authConfigFreeIpa:AuthConfigFreeIpa',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, access_mode=None, allowed_principal_ids=None, annotations=None, certificate=None, connection_timeout=None, enabled=None, group_dn_attribute=None, group_member_mapping_attribute=None, group_member_user_attribute=None, group_name_attribute=None, group_object_class=None, group_search_attribute=None, group_search_base=None, labels=None, name=None, nested_group_membership_enabled=None, port=None, servers=None, service_account_distinguished_name=None, service_account_password=None, tls=None, type=None, user_disabled_bit_mask=None, user_enabled_attribute=None, user_login_attribute=None, user_member_attribute=None, user_name_attribute=None, user_object_class=None, user_search_attribute=None, user_search_base=None):
"""
Get an existing AuthConfigFreeIpa resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_mode: Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
:param pulumi.Input[list] allowed_principal_ids: Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `freeipa_user://<DN>` `freeipa_group://<DN>` (list)
:param pulumi.Input[dict] annotations: Annotations of the resource (map)
:param pulumi.Input[str] certificate: Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
:param pulumi.Input[float] connection_timeout: FreeIpa connection timeout. Default `5000` (int)
:param pulumi.Input[bool] enabled: Enable auth config provider. Default `true` (bool)
:param pulumi.Input[str] group_dn_attribute: Group DN attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_member_mapping_attribute: Group member mapping attribute. Default `member` (string)
:param pulumi.Input[str] group_member_user_attribute: Group member user attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_name_attribute: Group name attribute. Default `cn` (string)
:param pulumi.Input[str] group_object_class: Group object class. Default `groupOfNames` (string)
:param pulumi.Input[str] group_search_attribute: Group search attribute. Default `cn` (string)
:param pulumi.Input[str] group_search_base: Group search base (string)
:param pulumi.Input[dict] labels: Labels of the resource (map)
:param pulumi.Input[str] name: (Computed) The name of the resource (string)
:param pulumi.Input[bool] nested_group_membership_enabled: Nested group membership enable. Default `false` (bool)
:param pulumi.Input[float] port: FreeIpa port. Default `389` (int)
:param pulumi.Input[list] servers: FreeIpa servers list (list)
:param pulumi.Input[str] service_account_distinguished_name: Service account DN for access FreeIpa service (string)
:param pulumi.Input[str] service_account_password: Service account password for access FreeIpa service (string)
:param pulumi.Input[bool] tls: Enable TLS connection (bool)
:param pulumi.Input[str] type: (Computed) The type of the resource (string)
:param pulumi.Input[float] user_disabled_bit_mask: User disabled bit mask (int)
:param pulumi.Input[str] user_enabled_attribute: User enable attribute (string)
:param pulumi.Input[str] user_login_attribute: User login attribute. Default `uid` (string)
:param pulumi.Input[str] user_member_attribute: User member attribute. Default `memberOf` (string)
:param pulumi.Input[str] user_name_attribute: User name attribute. Default `givenName` (string)
:param pulumi.Input[str] user_object_class: User object class. Default `inetorgperson` (string)
:param pulumi.Input[str] user_search_attribute: User search attribute. Default `uid|sn|givenName` (string)
:param pulumi.Input[str] user_search_base: User search base DN (string)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["access_mode"] = access_mode
__props__["allowed_principal_ids"] = allowed_principal_ids
__props__["annotations"] = annotations
__props__["certificate"] = certificate
__props__["connection_timeout"] = connection_timeout
__props__["enabled"] = enabled
__props__["group_dn_attribute"] = group_dn_attribute
__props__["group_member_mapping_attribute"] = group_member_mapping_attribute
__props__["group_member_user_attribute"] = group_member_user_attribute
__props__["group_name_attribute"] = group_name_attribute
__props__["group_object_class"] = group_object_class
__props__["group_search_attribute"] = group_search_attribute
__props__["group_search_base"] = group_search_base
__props__["labels"] = labels
__props__["name"] = name
__props__["nested_group_membership_enabled"] = nested_group_membership_enabled
__props__["port"] = port
__props__["servers"] = servers
__props__["service_account_distinguished_name"] = service_account_distinguished_name
__props__["service_account_password"] = service_account_password
__props__["tls"] = tls
__props__["type"] = type
__props__["user_disabled_bit_mask"] = user_disabled_bit_mask
__props__["user_enabled_attribute"] = user_enabled_attribute
__props__["user_login_attribute"] = user_login_attribute
__props__["user_member_attribute"] = user_member_attribute
__props__["user_name_attribute"] = user_name_attribute
__props__["user_object_class"] = user_object_class
__props__["user_search_attribute"] = user_search_attribute
__props__["user_search_base"] = user_search_base
return AuthConfigFreeIpa(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 57.512739
| 793
| 0.710726
| 2,157
| 18,059
| 5.593417
| 0.097821
| 0.054704
| 0.076917
| 0.059843
| 0.804393
| 0.750269
| 0.714712
| 0.675176
| 0.657522
| 0.636966
| 0
| 0.002617
| 0.195803
| 18,059
| 313
| 794
| 57.696486
| 0.828135
| 0.369345
| 0
| 0.014706
| 1
| 0
| 0.171169
| 0.075142
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0.058824
| 0.044118
| 0.014706
| 0.323529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
332881ff5598bbae0500a31fe196a9fb57551da4
| 122
|
py
|
Python
|
tests/check_profile.py
|
justengel/pybk8500
|
6a9748033c783a0081ec391359067dfb9dc83760
|
[
"MIT"
] | null | null | null |
tests/check_profile.py
|
justengel/pybk8500
|
6a9748033c783a0081ec391359067dfb9dc83760
|
[
"MIT"
] | null | null | null |
tests/check_profile.py
|
justengel/pybk8500
|
6a9748033c783a0081ec391359067dfb9dc83760
|
[
"MIT"
] | null | null | null |
from pybk8500.run_profile import main
# python -m pybk8500.run_profile "./check_profile.csv"
main('./check_profile.csv')
| 24.4
| 54
| 0.778689
| 18
| 122
| 5.055556
| 0.555556
| 0.241758
| 0.395604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072072
| 0.090164
| 122
| 4
| 55
| 30.5
| 0.747748
| 0.42623
| 0
| 0
| 0
| 0
| 0.279412
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3355b9121c87a85325bb532b9bb103d7d5260216
| 21
|
py
|
Python
|
kn3/__init__.py
|
zodman/kn3
|
11cc69196069e1fda723fc896e17ea79901ff6c2
|
[
"BSD-3-Clause"
] | null | null | null |
kn3/__init__.py
|
zodman/kn3
|
11cc69196069e1fda723fc896e17ea79901ff6c2
|
[
"BSD-3-Clause"
] | null | null | null |
kn3/__init__.py
|
zodman/kn3
|
11cc69196069e1fda723fc896e17ea79901ff6c2
|
[
"BSD-3-Clause"
] | null | null | null |
from .kn3 import Kn3
| 10.5
| 20
| 0.761905
| 4
| 21
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 0.190476
| 21
| 1
| 21
| 21
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
33560daf1e366b58a3eb75f066c5c7c9297acc0e
| 25
|
py
|
Python
|
Templates/FuncApp-Http-sql-Example/tools/__init__.py
|
mmaysami/azure-functions-python
|
e97b29204af83bc1fc81b886f841fe7b7bc0c8a3
|
[
"MIT"
] | null | null | null |
Templates/FuncApp-Http-sql-Example/tools/__init__.py
|
mmaysami/azure-functions-python
|
e97b29204af83bc1fc81b886f841fe7b7bc0c8a3
|
[
"MIT"
] | null | null | null |
Templates/FuncApp-Http-sql-Example/tools/__init__.py
|
mmaysami/azure-functions-python
|
e97b29204af83bc1fc81b886f841fe7b7bc0c8a3
|
[
"MIT"
] | null | null | null |
from .tools_math import *
| 25
| 25
| 0.8
| 4
| 25
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
336416883cf2fdc18bbc73b2302d4c934a8dec43
| 39,499
|
py
|
Python
|
tensorflow_version/net/generator.py
|
TijmenKort/blindinpainting_vcnet
|
d1a5467a6beec16450a6ef304ab26c88f4293cb5
|
[
"MIT"
] | 40
|
2020-07-31T06:16:37.000Z
|
2022-03-14T12:55:54.000Z
|
tensorflow_version/net/generator.py
|
TijmenKort/blindinpainting_vcnet
|
d1a5467a6beec16450a6ef304ab26c88f4293cb5
|
[
"MIT"
] | 6
|
2020-07-23T20:52:36.000Z
|
2021-05-20T01:38:58.000Z
|
tensorflow_version/net/generator.py
|
TijmenKort/blindinpainting_vcnet
|
d1a5467a6beec16450a6ef304ab26c88f4293cb5
|
[
"MIT"
] | 7
|
2020-09-14T14:08:08.000Z
|
2022-03-11T14:53:25.000Z
|
import tensorflow as tf
from net.ops import *
from net.loss import *
from util.util import f2uint
from functools import partial, reduce
from abc import abstractmethod, ABC as AbstractBaseClass
from tensorflow.contrib.framework.python.ops import arg_scope
class BaseNetwork(AbstractBaseClass):
def __init__(self, config=None):
self.config = config
self.net = partial(self.build_net, config=config)
@abstractmethod
def build_net(self, x, mask, config=None, reuse=False, training=True, name='blind_inpaint_net'):
pass
@abstractmethod
def evaluate(self, im, noise, mask, config, reuse=False):
pass
def forward(self, x, mask, reuse=False):
return self.net(x=x, mask=mask, reuse=reuse, training=True, name=self.config.name)
class VCNModel(BaseNetwork):
def __init__(self, config=None):
super(VCNModel, self).__init__(config=config)
def build_net(self, x, mask=None, reuse=False, name='blind_inpaint_net', config=None):
xshape = x.get_shape().as_list()
xh, xw = xshape[1], xshape[2]
xin = x
rho = config.rho
# network with three branches
cnum = self.config.g_cnum
cn_type = self.config.cn_type
conv_3 = partial(tf.layers.conv2d, kernel_size=3, activation=tf.nn.elu, padding='SAME')
if rho is not None:
config.rho = rho
with tf.variable_scope(name, reuse=reuse):
# branch mask
x = resblock(xin, cnum*2, 5, stride=2, name='mask_conv2')
x = resblock(x, cnum*4, 3, stride=2, name='mask_conv3')
x = resblock(x, cnum * 4, 3, stride=1, rate=2, name='mask_conv4_atrous')
mx_feat = resblock(x, cnum * 4, 3, stride=1, rate=4, name='mask_conv5_atrous')
xb3 = tf.image.resize_bilinear(mx_feat, [xh, xw], align_corners=True)
x = conv_3(inputs=x, filters=cnum * 4, strides=1, name='mask_conv8')
x = tf.image.resize_nearest_neighbor(x, [xh // 2, xw // 2], align_corners=True)
x = resblock(x, cnum * 2, 3, stride=1, name='mask_deconv9')
x = tf.image.resize_nearest_neighbor(x, [xh, xw], align_corners=True)
x = resblock(x, cnum, 3, stride=1, name='mask_deconv10')
x = conv_3(inputs=x, filters=cnum // 2, strides=1, name='mask_compress_conv')
mask_logit = tf.layers.conv2d(inputs=x, kernel_size=3, filters=1, strides=1, activation=None, padding='SAME',
name='mask_output')
mask_pred = tf.clip_by_value(mask_logit, 0., 1.)
if config.use_cn is True:
if config.phase == 'tune':
mask = mask_pred
else:
mask = None
if config.embrace is True:
xin = xin * (1 - mask)
x = context_resblock(xin, mask, cnum, 5, stride=1, name='cmp_conv1', debug=cn_type, alpha=config.rho)
x = context_resblock(x, mask, cnum*2, 3, stride=2, name='cmp_conv2', debug=cn_type, alpha=config.rho)
x = context_resblock(x, mask, cnum * 2, 3, stride=1, name='cmp_conv21', debug=cn_type, alpha=config.rho)
x = context_resblock(x, mask, cnum * 4, 3, stride=2, name='cmp_conv3', debug=cn_type, alpha=config.rho)
x = context_resblock(x, mask, cnum * 4, 3, stride=1, name='cmp_conv31', debug=cn_type, alpha=config.rho)
x = context_resblock(x, mask, cnum*4, 3, stride=1, rate=2, name='cmp_conv4_atrous', debug=cn_type, alpha=config.rho)
x = context_resblock(x, mask, cnum * 4, 3, stride=1, rate=2, name='cmp_conv5_atrous', alpha=config.rho)
x = context_resblock(x, mask, cnum * 4, 3, stride=1, rate=4, name='cmp_conv6_atrous', alpha=config.rho)
x = context_resblock(x, mask, cnum * 4, 3, stride=1, rate=4, name='cmp_conv7_atrous', debug=cn_type, alpha=config.rho)
x = context_resblock(x, mask, cnum * 4, 3, stride=1, name='cmp_conv8', debug=cn_type, alpha=config.rho)
x = tf.image.resize_nearest_neighbor(x, [xh // 2, xw // 2], align_corners=True)
x = context_resblock(x, mask, cnum * 2, 3, stride=1, name='cmp_deconv9', debug=cn_type, alpha=config.rho)
x = context_resblock(x, mask, cnum * 2, 3, stride=1, name='cmp_deconv91', debug=cn_type, alpha=config.rho)
x = tf.image.resize_nearest_neighbor(x, [xh, xw], align_corners=True)
x = context_resblock(x, mask, cnum, 3, stride=1, name='cmp_deconv10', debug=cn_type, alpha=config.rho)
xb1 = context_resblock(x, mask, cnum, 3, stride=1, name='cmp_deconv101', debug=cn_type, alpha=config.rho)
x = tf.concat([xb1, xb3], axis=-1)
x = conv_3(inputs=x, filters=cnum, strides=1, name='cmp_compress_conv1')
x = conv_3(inputs=x, filters=cnum//2, strides=1, name='cmp_compress_conv2')
x = tf.layers.conv2d(inputs=x, kernel_size=3, filters=3, strides=1, activation=None, padding='SAME',
name='cmp_output')
x = tf.clip_by_value(x, -1., 1.)
return x, mask_pred, mask_logit
def evaluate(self, im, noise, mask, config, reuse=False):
# generate mask, 1 represents masked point
self.config = config
im = im / 127.5 - 1
noise = noise / 127.5 - 1
if config.use_blend is True:
mask_soft = priority_loss_mask(1 - mask, hsize=15, iters=4) + mask
im = im * (1 - mask_soft) + noise * mask_soft
else:
im = im * (1 - mask) + noise * mask
batch_input = im
# inpaint
batch_predict, batch_mask, batch_mask_logit = self.build_net(im, reuse=reuse, config=config)
# apply mask and reconstruct
batch_complete = batch_predict * batch_mask + im * (1 - batch_mask)
bce = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=mask, logits=batch_mask_logit))
return batch_predict, batch_complete, batch_mask, bce, batch_input
def de_fence(self, im, mask, config, reuse=False):
# generate mask, 1 represents masked point
self.config = config
im = im / 127.5 - 1
batch_input = im
# inpaint
self.config.phase = 'acc'
batch_predict, batch_mask, batch_mask_logit = self.build_net(im, mask=mask, reuse=reuse, config=self.config)
# apply mask and reconstruct
batch_complete = batch_predict * batch_mask + im * (1 - batch_mask)
bce = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=mask, logits=batch_mask_logit))
return batch_predict, batch_complete, batch_mask, bce, batch_input
def dummy_use(self, im, mask, config, reuse=False):
# generate mask, 1 represents masked point
self.config = config
im = im / 127.5 - 1
im = im * (1 - mask)
# inpaint
batch_predict, batch_mask, batch_mask_logit = self.build_generator(im, reuse=reuse, config=config)
# apply mask and reconstruct
batch_complete = batch_predict * batch_mask + im * (1 - batch_mask)
# batch_complete = batch_predict
return batch_predict, batch_complete, batch_mask
class InpaintCAModel_MEN(BaseNetwork):
def __init__(self, config=None):
super(InpaintCAModel_MEN, self).__init__(config)
def build_net(self, x, mask, config=None, reuse=False, training=True, name='blind_inpaint_net'):
xin = x
x_one = tf.ones_like(x)[:, :, :, 0:1]
xshape = x.get_shape().as_list()
xh, xw = xshape[1], xshape[2]
# network with three branches
if config is None:
cnum = self.config.g_cnum
else:
cnum = config.g_cnum
conv_3 = self.conv3
padding='SAME'
with tf.variable_scope(name, reuse=reuse):
# branch mask
x = resblock(xin, cnum*2, 5, stride=2, name='mask_conv2')
x = resblock(x, cnum*4, 3, stride=2, name='mask_conv3')
x = resblock(x, cnum * 4, 3, stride=1, rate=2, name='mask_conv4_atrous')
mx_feat = resblock(x, cnum * 4, 3, stride=1, rate=4, name='mask_conv5_atrous')
x = conv_3(inputs=x, filters=cnum * 4, strides=1, name='mask_conv8')
x = tf.image.resize_nearest_neighbor(x, [xh // 2, xw // 2], align_corners=True)
x = resblock(x, cnum * 2, 3, stride=1, name='mask_deconv9')
x = tf.image.resize_nearest_neighbor(x, [xh, xw], align_corners=True)
x = resblock(x, cnum, 3, stride=1, name='mask_deconv10')
x = conv_3(inputs=x, filters=cnum // 2, strides=1, name='mask_compress_conv')
mask_logit = tf.layers.conv2d(inputs=x, kernel_size=3, filters=1, strides=1, activation=None, padding='SAME',
name='mask_output')
mask_pred = tf.clip_by_value(mask_logit, 0., 1.)
if config.phase == 'tune':
mask = mask_pred
if config.embrace is True:
xin = xin * (1 - mask)
with tf.variable_scope(name, reuse=reuse), \
arg_scope([gen_conv, gen_deconv],
training=training, padding=padding):
x = tf.concat([xin, mask * x_one], axis=-1)
# stage1
x = gen_conv(x, cnum, 5, 1, name='conv1')
x = gen_conv(x, 2*cnum, 3, 2, name='conv2_downsample')
x = gen_conv(x, 2*cnum, 3, 1, name='conv3')
x = gen_conv(x, 4*cnum, 3, 2, name='conv4_downsample')
x = gen_conv(x, 4*cnum, 3, 1, name='conv5')
x = gen_conv(x, 4*cnum, 3, 1, name='conv6')
x = gen_conv(x, 4*cnum, 3, rate=2, name='conv7_atrous')
x = gen_conv(x, 4*cnum, 3, rate=4, name='conv8_atrous')
x = gen_conv(x, 4*cnum, 3, rate=8, name='conv9_atrous')
x = gen_conv(x, 4*cnum, 3, rate=16, name='conv10_atrous')
x = gen_conv(x, 4*cnum, 3, 1, name='conv11')
x = gen_conv(x, 4*cnum, 3, 1, name='conv12')
x = gen_deconv(x, 2*cnum, name='conv13_upsample')
x = gen_conv(x, 2*cnum, 3, 1, name='conv14')
x = gen_deconv(x, cnum, name='conv15_upsample')
x = gen_conv(x, cnum//2, 3, 1, name='conv16')
x = gen_conv(x, 3, 3, 1, activation=None, name='conv17')
x = tf.clip_by_value(x, -1., 1.)
x_stage1 = x
x = x*mask + xin*(1.-mask)
ones_x = tf.ones_like(x, dtype=tf.float32)[:, :, :, 0:1]
xnow = tf.concat([x, ones_x*mask], axis=3)
x = gen_conv(xnow, cnum, 5, 1, name='xconv1')
x = gen_conv(x, cnum, 3, 2, name='xconv2_downsample')
x = gen_conv(x, 2*cnum, 3, 1, name='xconv3')
x = gen_conv(x, 2*cnum, 3, 2, name='xconv4_downsample')
x = gen_conv(x, 4*cnum, 3, 1, name='xconv5')
x = gen_conv(x, 4*cnum, 3, 1, name='xconv6')
x = gen_conv(x, 4*cnum, 3, rate=2, name='xconv7_atrous')
x = gen_conv(x, 4*cnum, 3, rate=4, name='xconv8_atrous')
x = gen_conv(x, 4*cnum, 3, rate=8, name='xconv9_atrous')
x = gen_conv(x, 4*cnum, 3, rate=16, name='xconv10_atrous')
x_hallu = x
# attention branch
x = gen_conv(xnow, cnum, 5, 1, name='pmconv1')
x = gen_conv(x, cnum, 3, 2, name='pmconv2_downsample')
x = gen_conv(x, 2*cnum, 3, 1, name='pmconv3')
x = gen_conv(x, 4*cnum, 3, 2, name='pmconv4_downsample')
x = gen_conv(x, 4*cnum, 3, 1, name='pmconv5')
x = gen_conv(x, 4*cnum, 3, 1, name='pmconv6',
activation=tf.nn.relu)
mask_s = resize_mask_like(mask, x)[0:1, :, :, :]
x, offset_flow = contextual_attention(x, x, mask_s, 3, 1, rate=2)
x = gen_conv(x, 4*cnum, 3, 1, name='pmconv9')
x = gen_conv(x, 4*cnum, 3, 1, name='pmconv10')
pm = x
x = tf.concat([x_hallu, pm], axis=3)
x = gen_conv(x, 4*cnum, 3, 1, name='allconv11')
x = gen_conv(x, 4*cnum, 3, 1, name='allconv12')
x = gen_deconv(x, 2*cnum, name='allconv13_upsample')
x = gen_conv(x, 2*cnum, 3, 1, name='allconv14')
x = gen_deconv(x, cnum, name='allconv15_upsample')
x = gen_conv(x, cnum//2, 3, 1, name='allconv16')
x = gen_conv(x, 3, 3, 1, activation=None, name='allconv17')
x_stage2 = tf.clip_by_value(x, -1., 1.)
return x_stage1, x_stage2, offset_flow, mask_pred, mask_logit
def evaluate(self, batch_data, batch_noise, masks, config=None, reuse=False, is_training=False):
"""
"""
# generate mask, 1 represents masked point
batch_pos = batch_data / 127.5 - 1.
batch_noise = batch_noise / 127.5 - 1
im = batch_pos
if config.use_blend is True:
mask_soft = priority_loss_mask(1 - masks, hsize=15, iters=4) + masks
im = im * (1 - mask_soft) + batch_noise * mask_soft
else:
im = im * (1 - masks) + batch_noise * masks
# inpaint
x1, x2, flow, mask_pred, mask_logit = self.build_net(im, masks, reuse=reuse, training=is_training, config=config)
batch_predict = x2
# apply mask and reconstruct
batch_complete = batch_predict*mask_pred + im*(1-mask_pred)
bce = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=masks, logits=mask_logit))
return x2, batch_complete, mask_pred, bce, im
class NaiveED(BaseNetwork):
def __init__(self, config=None):
super(NaiveED, self).__init__(config)
def build_net(self, x, mask, config=None, reuse=False, training=True, name='blind_inpaint_net'):
# two stage network
cnum = 32
with tf.variable_scope(name, reuse=reuse), \
arg_scope([gen_conv, gen_deconv],
training=training, padding=padding):
# stage1
x = gen_conv(x, cnum, 5, 1, name='conv1')
x = gen_conv(x, 2*cnum, 3, 2, name='conv2_downsample')
x = gen_conv(x, 2*cnum, 3, 1, name='conv3')
x = gen_conv(x, 4*cnum, 3, 2, name='conv4_downsample')
x = gen_conv(x, 4*cnum, 3, 1, name='conv5')
x = gen_conv(x, 4*cnum, 3, 1, name='conv6')
x = gen_conv(x, 4*cnum, 3, rate=2, name='conv7_atrous')
x = gen_conv(x, 4*cnum, 3, rate=4, name='conv8_atrous')
x = gen_conv(x, 4*cnum, 3, rate=8, name='conv9_atrous')
x = gen_conv(x, 4*cnum, 3, rate=16, name='conv10_atrous')
x = gen_conv(x, 4*cnum, 3, 1, name='conv11')
x = gen_conv(x, 4*cnum, 3, 1, name='conv12')
x = gen_deconv(x, 2*cnum, name='conv13_upsample')
x = gen_conv(x, 2*cnum, 3, 1, name='conv14')
x = gen_deconv(x, cnum, name='conv15_upsample')
x = gen_conv(x, cnum//2, 3, 1, name='conv16')
x = gen_conv(x, 3, 3, 1, activation=None, name='conv17')
x = tf.clip_by_value(x, -1., 1.)
x_stage1 = x
# return x_stage1, None, None
# stage2, paste result as input
# x = tf.stop_gradient(x)
# x = x*mask + xin*(1.-mask)
# x.set_shape(xin.get_shape().as_list())
# conv branch
# xnow = tf.concat([x, ones_x, ones_x*mask], axis=3)
xnow = x_stage1
x = gen_conv(xnow, cnum, 5, 1, name='xconv1')
x = gen_conv(x, cnum, 3, 2, name='xconv2_downsample')
x = gen_conv(x, 2*cnum, 3, 1, name='xconv3')
x = gen_conv(x, 2*cnum, 3, 2, name='xconv4_downsample')
x = gen_conv(x, 4*cnum, 3, 1, name='xconv5')
x = gen_conv(x, 4*cnum, 3, 1, name='xconv6')
x = gen_conv(x, 4*cnum, 3, rate=2, name='xconv7_atrous')
x = gen_conv(x, 4*cnum, 3, rate=4, name='xconv8_atrous')
x = gen_conv(x, 4*cnum, 3, rate=8, name='xconv9_atrous')
x = gen_conv(x, 4*cnum, 3, rate=16, name='xconv10_atrous')
x = gen_conv(x, 4*cnum, 3, 1, name='allconv11')
x = gen_conv(x, 4*cnum, 3, 1, name='allconv12')
x = gen_deconv(x, 2*cnum, name='allconv13_upsample')
x = gen_conv(x, 2*cnum, 3, 1, name='allconv14')
x = gen_deconv(x, cnum, name='allconv15_upsample')
x = gen_conv(x, cnum//2, 3, 1, name='allconv16')
x = gen_conv(x, 3, 3, 1, activation=None, name='allconv17')
x_stage2 = tf.clip_by_value(x, -1., 1.)
return x_stage1, x_stage2
def evaluate(self, batch_data, noise, mask, config=None, reuse=False, is_training=False):
"""
"""
# generate mask, 1 represents masked point
im = batch_data / 127.5 - 1.
noise = noise / 127.5 - 1
if config.use_blend is True:
mask_soft = priority_loss_mask(1 - mask, hsize=15, iters=4) + mask
im = im * (1 - mask_soft) + noise * mask_soft
else:
im = im * (1 - mask) + noise * mask
# inpaint
x1, x2 = self.build_net(im, mask, reuse=reuse, training=is_training, config=config)
batch_predict = x2
# apply mask and reconstruct
batch_complete = batch_predict
return batch_predict, batch_complete, None, None, im
class GMCNNModel_MEN(BaseNetwork):
def __init__(self, config=None):
super(GMCNNModel_MEN, self).__init__(config)
def build_net(self, x, mask, config=None, reuse=False, training=True, name='blind_inpaint_net'):
xshape = x.get_shape().as_list()
xh, xw = xshape[1], xshape[2]
if config is not None:
self.config = config
# network with three branches
cnum = self.config.g_cnum
b_names = ['b1', 'b2', 'b3', 'merge']
conv_7 = partial(tf.layers.conv2d, kernel_size=7, activation=tf.nn.elu, padding='SAME')
conv_5 = partial(tf.layers.conv2d, kernel_size=5, activation=tf.nn.elu, padding='SAME')
conv_3 = partial(tf.layers.conv2d, kernel_size=3, activation=tf.nn.elu, padding='SAME')
with tf.variable_scope(name, reuse=reuse):
# branch mask
x = resblock(x, cnum*2, 5, stride=2, name='mask_conv2')
x = resblock(x, cnum*4, 3, stride=2, name='mask_conv3')
x = resblock(x, cnum * 4, 3, stride=1, rate=2, name='mask_conv4_atrous')
x = conv_3(inputs=x, filters=cnum * 4, strides=1, name='mask_conv8')
x = tf.image.resize_nearest_neighbor(x, [xh // 2, xw // 2], align_corners=True)
x = resblock(x, cnum * 2, 3, stride=1, name='mask_deconv9')
x = tf.image.resize_nearest_neighbor(x, [xh, xw], align_corners=True)
x = resblock(x, cnum, 3, stride=1, name='mask_deconv10')
x = conv_3(inputs=x, filters=cnum // 2, strides=1, name='mask_compress_conv')
mask_logit = tf.layers.conv2d(inputs=x, kernel_size=3, filters=1, strides=1, activation=None, padding='SAME',
name='mask_output')
mask_pred = tf.clip_by_value(mask_logit, 0., 1.)
# branch 3
if config.phase == 'tune':
mask = mask_pred
if config.embrace is True:
x = x * (1 - mask)
ones_x = tf.ones_like(x)[:, :, :, 0:1]
x_w_mask = tf.concat([x, ones_x * mask], axis=3)
with tf.variable_scope(name, reuse=reuse):
# branch 1
x = conv_7(inputs=x_w_mask, filters=cnum, strides=1, name=b_names[0] + 'conv1')
x = conv_7(inputs=x, filters=2*cnum, strides=2, name=b_names[0] + 'conv2_downsample')
x = conv_7(inputs=x, filters=2*cnum, strides=1, name=b_names[0] + 'conv3')
x = conv_7(inputs=x, filters=4*cnum, strides=2, name=b_names[0] + 'conv4_downsample')
x = conv_7(inputs=x, filters=4*cnum, strides=1, name=b_names[0] + 'conv5')
x = conv_7(inputs=x, filters=4*cnum, strides=1, name=b_names[0] + 'conv6')
x = conv_7(inputs=x, filters=4*cnum, strides=1, dilation_rate=2, name=b_names[0] + 'conv7_atrous')
x = conv_7(inputs=x, filters=4*cnum, strides=1, dilation_rate=4, name=b_names[0] + 'conv8_atrous')
x = conv_7(inputs=x, filters=4*cnum, strides=1, dilation_rate=8, name=b_names[0] + 'conv9_atrous')
x = conv_7(inputs=x, filters=4*cnum, strides=1, dilation_rate=16, name=b_names[0] + 'conv10_atrous')
if cnum > 32:
x = conv_7(inputs=x, filters=4 * cnum, strides=1, dilation_rate=32, name=b_names[0] + 'conv11_atrous')
x = conv_7(inputs=x, filters=4*cnum, strides=1, name=b_names[0] + 'conv11')
x = conv_7(inputs=x, filters=4*cnum, strides=1, name=b_names[0] + 'conv12')
x_b1 = tf.image.resize_bilinear(x, [xh, xw], align_corners=True)
# branch 2
x = conv_5(inputs=x_w_mask, filters=cnum, strides=1, name=b_names[1] + 'conv1')
x = conv_5(inputs=x, filters=2 * cnum, strides=2, name=b_names[1] + 'conv2_downsample')
x = conv_5(inputs=x, filters=2 * cnum, strides=1, name=b_names[1] + 'conv3')
x = conv_5(inputs=x, filters=4 * cnum, strides=2, name=b_names[1] + 'conv4_downsample')
x = conv_5(inputs=x, filters=4 * cnum, strides=1, name=b_names[1] + 'conv5')
x = conv_5(inputs=x, filters=4 * cnum, strides=1, name=b_names[1] + 'conv6')
x = conv_5(inputs=x, filters=4 * cnum, strides=1, dilation_rate=2, name=b_names[1] + 'conv7_atrous')
x = conv_5(inputs=x, filters=4 * cnum, strides=1, dilation_rate=4, name=b_names[1] + 'conv8_atrous')
x = conv_5(inputs=x, filters=4 * cnum, strides=1, dilation_rate=8, name=b_names[1] + 'conv9_atrous')
x = conv_5(inputs=x, filters=4 * cnum, strides=1, dilation_rate=16, name=b_names[1] + 'conv10_atrous')
if cnum > 32:
x = conv_5(inputs=x, filters=4 * cnum, strides=1, dilation_rate=32, name=b_names[1] + 'conv11_atrous')
x = conv_5(inputs=x, filters=4 * cnum, strides=1, name=b_names[1] + 'conv11')
x = conv_5(inputs=x, filters=4 * cnum, strides=1, name=b_names[1] + 'conv12')
x = tf.image.resize_nearest_neighbor(x, [xh//2, xw//2], align_corners=True)
with tf.variable_scope(b_names[1] + 'conv13_upsample'):
x = conv_3(inputs=x, filters=2 * cnum, strides=1, name=b_names[1] + 'conv13_upsample_conv')
x = conv_5(inputs=x, filters=2 * cnum, strides=1, name=b_names[1] + 'conv14')
x_b2 = tf.image.resize_bilinear(x, [xh, xw], align_corners=True)
# branch 3
x = conv_5(inputs=x_w_mask, filters=cnum, strides=1, name=b_names[2] + 'conv1')
x = conv_3(inputs=x, filters=2 * cnum, strides=2, name=b_names[2] + 'conv2_downsample')
x = conv_3(inputs=x, filters=2 * cnum, strides=1, name=b_names[2] + 'conv3')
x = conv_3(inputs=x, filters=4 * cnum, strides=2, name=b_names[2] + 'conv4_downsample')
x = conv_3(inputs=x, filters=4 * cnum, strides=1, name=b_names[2] + 'conv5')
x = conv_3(inputs=x, filters=4 * cnum, strides=1, name=b_names[2] + 'conv6')
x = conv_3(inputs=x, filters=4 * cnum, strides=1, dilation_rate=2, name=b_names[2] + 'conv7_atrous')
x = conv_3(inputs=x, filters=4 * cnum, strides=1, dilation_rate=4, name=b_names[2] + 'conv8_atrous')
x = conv_3(inputs=x, filters=4 * cnum, strides=1, dilation_rate=8, name=b_names[2] + 'conv9_atrous')
x = conv_3(inputs=x, filters=4 * cnum, strides=1, dilation_rate=16, name=b_names[2] + 'conv10_atrous')
if cnum > 32:
x = conv_3(inputs=x, filters=4 * cnum, strides=1, dilation_rate=32, name=b_names[2] + 'conv11_atrous')
x = conv_3(inputs=x, filters=4 * cnum, strides=1, name=b_names[2] + 'conv11')
x = conv_3(inputs=x, filters=4 * cnum, strides=1, name=b_names[2] + 'conv12')
x = tf.image.resize_nearest_neighbor(x, [xh // 2, xw // 2], align_corners=True)
with tf.variable_scope(b_names[2] + 'conv13_upsample'):
x = conv_3(inputs=x, filters=2 * cnum, strides=1, name=b_names[2] + 'conv13_upsample_conv')
x = conv_3(inputs=x, filters=2 * cnum, strides=1, name=b_names[2] + 'conv14')
x = tf.image.resize_nearest_neighbor(x, [xh, xw], align_corners=True)
with tf.variable_scope(b_names[2] + 'conv15_upsample'):
x = conv_3(inputs=x, filters=cnum, strides=1, name=b_names[2] + 'conv15_upsample_conv')
x_b3 = conv_3(inputs=x, filters=cnum//2, strides=1, name=b_names[2] + 'conv16')
x_merge = tf.concat([x_b1, x_b2, x_b3], axis=3)
x = conv_3(inputs=x_merge, filters=cnum // 2, strides=1, name=b_names[3] + 'conv17')
x = tf.layers.conv2d(inputs=x, kernel_size=3, filters=3, strides=1, activation=None, padding='SAME',
name=b_names[3] + 'conv18')
x = tf.clip_by_value(x, -1., 1.)
return x, mask_pred, mask_logit
def evaluate(self, im, noise, mask, config, reuse=False):
# generate mask, 1 represents masked point
self.config = config
im = im / 127.5 - 1
noise = noise / 127.5 - 1
if config.use_blend is True:
mask_soft = priority_loss_mask(1 - mask, hsize=15, iters=4) + mask
im = im * (1 - mask_soft) + noise * mask_soft
else:
im = im * (1 - mask) + noise * mask
# inpaint
batch_predict, mask_pred, mask_logit = self.build_net(im, mask, config=config, reuse=reuse)
# apply mask and reconstruct
batch_complete = batch_predict * mask_pred + im * (1 - mask_pred)
bce = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=mask, logits=mask_logit))
return batch_predict, batch_complete, mask_pred, bce, im
class PartialConvNet(BaseNetwork):
def __init__(self, config=None):
super(PartialConvNet, self).__init__(config)
def build_net(self, x, mask=None, reuse=False, name='blind_inpaint_net', config=None):
xshape = x.get_shape().as_list()
xh, xw = xshape[1], xshape[2]
xin = x
# network with three branches
cnum = self.config.g_cnum
conv_3 = partial(tf.layers.conv2d, kernel_size=3, activation=tf.nn.elu, padding='SAME')
with tf.variable_scope(name, reuse=reuse):
# branch mask
x = resblock(xin, cnum*2, 5, stride=2, name='mask_conv2')
x = resblock(x, cnum*4, 3, stride=2, name='mask_conv3')
x = resblock(x, cnum * 4, 3, stride=1, rate=2, name='mask_conv4_atrous')
mx_feat = resblock(x, cnum * 4, 3, stride=1, rate=4, name='mask_conv5_atrous')
x = resblock(mx_feat, cnum * 4, 3, stride=1, name='mask_conv8')
x = conv_3(inputs=x, filters=cnum * 4, strides=1, name='mask_conv8')
x = tf.image.resize_nearest_neighbor(x, [xh // 2, xw // 2], align_corners=True)
x = resblock(x, cnum * 2, 3, stride=1, name='mask_deconv9')
x = tf.image.resize_nearest_neighbor(x, [xh, xw], align_corners=True)
x = resblock(x, cnum, 3, stride=1, name='mask_deconv10')
x = conv_3(inputs=x, filters=cnum // 2, strides=1, name='mask_compress_conv')
mask_logit = tf.layers.conv2d(inputs=x, kernel_size=3, filters=1, strides=1, activation=None,
padding='SAME', name='mask_output')
mask_pred = tf.clip_by_value(mask_logit, 0., 1.)
# branch 3
if config.phase == 'tune' or mask is None:
mask = mask_pred
if config.embrace is True:
xin = xin * (1 - mask)
xin_ch = xin.get_shape().as_list()[-1]
m = 1 - tf.tile(mask, [1, 1, 1, xin_ch])
min = m
x1, m1 = partial_conv(xin, m, cnum * 2, 7, stride=2, activation=tf.nn.relu, name='cmp_pconv1')
x2, m2 = partial_conv(x1, m1, cnum * 4, 5, stride=2, activation=tf.nn.relu, name='cmp_pconv2')
x3, m3 = partial_conv(x2, m2, cnum * 8, 5, stride=2, activation=tf.nn.relu, name='cmp_pconv3')
x4, m4 = partial_conv(x3, m3, cnum * 16, 3, stride=2, activation=tf.nn.relu, name='cmp_pconv4')
x5, m5 = partial_conv(x4, m4, cnum * 16, 3, stride=2, activation=tf.nn.relu, name='cmp_pconv5')
x6, m6 = partial_conv(x5, m5, cnum * 16, 3, stride=2, activation=tf.nn.relu, name='cmp_pconv6')
x, m = partial_conv(x6, m6, cnum * 16, 3, stride=2, activation=tf.nn.relu, name='cmp_pconv7')
h, w = x.get_shape().as_list()[1:3]
h, w = h * 2, w * 2
x = tf.image.resize_nearest_neighbor(x, [h, w])
m = tf.image.resize_nearest_neighbor(m, [h, w])
x, m = partial_conv(tf.concat([x, x6], -1), tf.concat([m, m6], -1), cnum * 16, 3, stride=1,
activation=tf.nn.leaky_relu, name='cmp_pdconv1')
h, w = h * 2, w * 2
x = tf.image.resize_nearest_neighbor(x, [h, w])
m = tf.image.resize_nearest_neighbor(m, [h, w])
x, m = partial_conv(tf.concat([x, x5], -1), tf.concat([m, m5], -1), cnum * 16, 3, stride=1,
activation=tf.nn.leaky_relu, name='cmp_pdconv2')
h, w = h * 2, w * 2
x = tf.image.resize_nearest_neighbor(x, [h, w])
m = tf.image.resize_nearest_neighbor(m, [h, w])
x, m = partial_conv(tf.concat([x, x4], -1), tf.concat([m, m4], -1), cnum * 16, 3, stride=1,
activation=tf.nn.leaky_relu, name='cmp_pdconv3')
h, w = h * 2, w * 2
x = tf.image.resize_nearest_neighbor(x, [h, w])
m = tf.image.resize_nearest_neighbor(m, [h, w])
x, m = partial_conv(tf.concat([x, x3], -1), tf.concat([m, m3], -1), cnum * 8, 3, stride=1,
activation=tf.nn.leaky_relu, name='cmp_pdconv4')
h, w = h * 2, w * 2
x = tf.image.resize_nearest_neighbor(x, [h, w])
m = tf.image.resize_nearest_neighbor(m, [h, w])
x, m = partial_conv(tf.concat([x, x2], -1), tf.concat([m, m2], -1), cnum * 4, 3, stride=1,
activation=tf.nn.leaky_relu, name='cmp_pdconv5')
h, w = h * 2, w * 2
x = tf.image.resize_nearest_neighbor(x, [h, w])
m = tf.image.resize_nearest_neighbor(m, [h, w])
x, m = partial_conv(tf.concat([x, x1], -1), tf.concat([m, m1], -1), cnum * 2, 3, stride=1,
activation=tf.nn.leaky_relu, name='cmp_pdconv6')
h, w = h * 2, w * 2
x = tf.image.resize_nearest_neighbor(x, [h, w])
m = tf.image.resize_nearest_neighbor(m, [h, w])
x, _ = partial_conv(tf.concat([x, xin], -1), tf.concat([m, min], -1), 3, 3, stride=1,
activation=None, name='cmp_pdconv7')
x = tf.clip_by_value(x, -1., 1.)
return x, mask_pred, mask_logit
def evaluate(self, im, noise, mask, config, reuse=False):
# generate mask, 1 represents masked point
self.config = config
im = im / 127.5 - 1
noise = noise / 127.5 - 1
if config.use_blend is True:
mask_soft = priority_loss_mask(1 - mask, hsize=15, iters=4) + mask
im = im * (1 - mask_soft) + noise * mask_soft
else:
im = im * (1 - mask) + noise * mask
# inpaint
batch_predict, mask_pred, mask_logit = self.build_net(im, mask, reuse=reuse, config=config)
# apply mask and reconstruct
batch_complete = batch_predict * mask + im * (1 - mask)
bce = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=mask, logits=mask_logit))
return batch_predict, batch_complete, mask_pred, bce, im
class InpaintGatedModel_MEN(BaseNetwork):
def __init__(self, config=None):
super(InpaintGatedModel_MEN, self).__init__(config)
def build_net(self, x, mask, config=None, reuse=False, training=True, name='blind_inpaint_net'):
xin = x
ones_x = tf.ones_like(x)[:, :, :, 0:1]
xshape = x.get_shape().as_list()
xh, xw = xshape[1], xshape[2]
padding = 'SAME'
if config is None:
cnum = self.config.g_cnum
else:
cnum = config.g_cnum
conv_3 = partial(tf.layers.conv2d, kernel_size=3, activation=tf.nn.elu, padding='SAME')
with tf.variable_scope(name, reuse=reuse):
x = resblock(xin, cnum * 2, 5, stride=2, name='mask_conv2')
x = resblock(x, cnum * 4, 3, stride=2, name='mask_conv3')
x = resblock(x, cnum * 4, 3, stride=1, rate=2, name='mask_conv4_atrous')
x = conv_3(inputs=x, filters=cnum * 4, strides=1, name='mask_conv8')
x = tf.image.resize_nearest_neighbor(x, [xh // 2, xw // 2], align_corners=True)
x = resblock(x, cnum * 2, 3, stride=1, name='mask_deconv9')
x = tf.image.resize_nearest_neighbor(x, [xh, xw], align_corners=True)
x = resblock(x, cnum, 3, stride=1, name='mask_deconv10')
x = conv_3(inputs=x, filters=cnum // 2, strides=1, name='mask_compress_conv')
mask_logit = tf.layers.conv2d(inputs=x, kernel_size=3, filters=1, strides=1, activation=None,
padding='SAME',
name='mask_output')
mask_pred = tf.clip_by_value(mask_logit, 0., 1.)
if config.phase == 'tune':
mask = mask_pred
if config.embrace is True:
xin = xin * (1 - mask)
# two stage network
cnum = 48
x = tf.concat([xin, ones_x, ones_x * mask], axis=3)
with tf.variable_scope(name, reuse=reuse), \
arg_scope([gen_conv, gen_deconv],
training=training, padding=padding):
# stage1
x = gen_gatedconv(x, cnum, 5, 1, name='conv1')
x = gen_gatedconv(x, 2*cnum, 3, 2, name='conv2_downsample')
x = gen_gatedconv(x, 2*cnum, 3, 1, name='conv3')
x = gen_gatedconv(x, 4*cnum, 3, 2, name='conv4_downsample')
x = gen_gatedconv(x, 4*cnum, 3, 1, name='conv5')
x = gen_gatedconv(x, 4*cnum, 3, 1, name='conv6')
mask_s = resize_mask_like(mask, x)
x = gen_gatedconv(x, 4*cnum, 3, rate=2, name='conv7_atrous')
x = gen_gatedconv(x, 4*cnum, 3, rate=4, name='conv8_atrous')
x = gen_gatedconv(x, 4*cnum, 3, rate=8, name='conv9_atrous')
x = gen_gatedconv(x, 4*cnum, 3, rate=16, name='conv10_atrous')
x = gen_gatedconv(x, 4*cnum, 3, 1, name='conv11')
x = gen_gatedconv(x, 4*cnum, 3, 1, name='conv12')
x = gen_degatedconv(x, 2*cnum, name='conv13_upsample')
x = gen_gatedconv(x, 2*cnum, 3, 1, name='conv14')
x = gen_degatedconv(x, cnum, name='conv15_upsample')
x = gen_gatedconv(x, cnum//2, 3, 1, name='conv16')
x = gen_gatedconv(x, 3, 3, 1, activation=None, name='conv17')
x = tf.nn.tanh(x)
x_stage1 = x
# stage2, paste result as input
x = x*mask + xin[:, :, :, 0:3]*(1.-mask)
x.set_shape(xin[:, :, :, 0:3].get_shape().as_list())
xnow = x
x = gen_gatedconv(xnow, cnum, 5, 1, name='xconv1')
x = gen_gatedconv(x, cnum, 3, 2, name='xconv2_downsample')
x = gen_gatedconv(x, 2*cnum, 3, 1, name='xconv3')
x = gen_gatedconv(x, 2*cnum, 3, 2, name='xconv4_downsample')
x = gen_gatedconv(x, 4*cnum, 3, 1, name='xconv5')
x = gen_gatedconv(x, 4*cnum, 3, 1, name='xconv6')
x = gen_gatedconv(x, 4*cnum, 3, rate=2, name='xconv7_atrous')
x = gen_gatedconv(x, 4*cnum, 3, rate=4, name='xconv8_atrous')
x = gen_gatedconv(x, 4*cnum, 3, rate=8, name='xconv9_atrous')
x = gen_gatedconv(x, 4*cnum, 3, rate=16, name='xconv10_atrous')
x_hallu = x
# attention branch
x = gen_gatedconv(xnow, cnum, 5, 1, name='pmconv1')
x = gen_gatedconv(x, cnum, 3, 2, name='pmconv2_downsample')
x = gen_gatedconv(x, 2*cnum, 3, 1, name='pmconv3')
x = gen_gatedconv(x, 4*cnum, 3, 2, name='pmconv4_downsample')
x = gen_gatedconv(x, 4*cnum, 3, 1, name='pmconv5')
x = gen_gatedconv(x, 4*cnum, 3, 1, name='pmconv6', activation=tf.nn.relu)
mask_s = tf.reduce_mean(mask_s, axis=0, keep_dims=True)
x, offset_flow = contextual_attention(x, x, mask_s, 3, 1, rate=2)
x = gen_gatedconv(x, 4*cnum, 3, 1, name='pmconv9')
x = gen_gatedconv(x, 4*cnum, 3, 1, name='pmconv10')
pm = x
x = tf.concat([x_hallu, pm], axis=3)
x = gen_gatedconv(x, 4*cnum, 3, 1, name='allconv11')
x = gen_gatedconv(x, 4*cnum, 3, 1, name='allconv12')
x = gen_degatedconv(x, 2*cnum, name='allconv13_upsample')
x = gen_gatedconv(x, 2*cnum, 3, 1, name='allconv14')
x = gen_degatedconv(x, cnum, name='allconv15_upsample')
x = gen_gatedconv(x, cnum//2, 3, 1, name='allconv16')
x = gen_gatedconv(x, 3, 3, 1, activation=None, name='allconv17')
x = tf.nn.tanh(x)
x_stage2 = x
return x_stage1, x_stage2, offset_flow, mask_pred, mask_logit
def build_sn_patch_gan_discriminator(self, x, reuse=False, training=True):
with tf.variable_scope('sn_patch_gan', reuse=reuse):
cnum = 64
x = dis_spectralconv(x, cnum, name='conv1', training=training)
x = dis_spectralconv(x, cnum*2, name='conv2', training=training)
x = dis_spectralconv(x, cnum*4, name='conv3', training=training)
x = dis_spectralconv(x, cnum*4, name='conv4', training=training)
x = dis_spectralconv(x, cnum*4, name='conv5', training=training)
x = dis_spectralconv(x, cnum*4, name='conv6', training=training)
x = flatten(x, name='flatten')
return x
def build_gan_discriminator(
self, batch, reuse=False, training=True):
with tf.variable_scope('discriminator', reuse=reuse):
d = self.build_sn_patch_gan_discriminator(
batch, reuse=reuse, training=training)
return d
def evaluate(self, batch_data, noise, mask, config=None, reuse=False, is_training=False):
im = batch_data / 127.5 - 1.
noise = noise / 127.5 - 1
if config.use_blend is True:
mask_soft = priority_loss_mask(1-mask, hsize=15, iters=4)+mask
im = im * (1 - mask_soft) + noise * mask_soft
else:
im = im * (1 - mask) + noise * mask
x1, x2, flow, mask_pred, mask_logit = self.build_net(im, mask, config=config, reuse=reuse, training=False)
batch_predict = x2
batch_complete = batch_predict * mask_pred + im * (1 - mask_pred)
bce = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=mask, logits=mask_logit))
return batch_predict, batch_complete, mask_pred, bce, im
| 53.018792
| 130
| 0.575458
| 5,852
| 39,499
| 3.708476
| 0.050239
| 0.021749
| 0.025067
| 0.026956
| 0.889918
| 0.872316
| 0.860059
| 0.831029
| 0.804995
| 0.75629
| 0
| 0.051422
| 0.283146
| 39,499
| 744
| 131
| 53.090054
| 0.715027
| 0.027823
| 0
| 0.580372
| 0
| 0
| 0.076516
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043993
| false
| 0.003384
| 0.011844
| 0.001692
| 0.096447
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6847bed2af0e51e2757b804411e0db73023f67d1
| 112
|
py
|
Python
|
internal/devel/lib/python2.7/dist-packages/pose_graph_msgs/msg/__init__.py
|
rishabhraaj17/blam_updates
|
a7fff0d29d99d51d02128af56d504c242e4cdfa9
|
[
"BSD-3-Clause"
] | null | null | null |
internal/devel/lib/python2.7/dist-packages/pose_graph_msgs/msg/__init__.py
|
rishabhraaj17/blam_updates
|
a7fff0d29d99d51d02128af56d504c242e4cdfa9
|
[
"BSD-3-Clause"
] | null | null | null |
internal/devel/lib/python2.7/dist-packages/pose_graph_msgs/msg/__init__.py
|
rishabhraaj17/blam_updates
|
a7fff0d29d99d51d02128af56d504c242e4cdfa9
|
[
"BSD-3-Clause"
] | null | null | null |
from ._KeyedScan import *
from ._PoseGraph import *
from ._PoseGraphEdge import *
from ._PoseGraphNode import *
| 22.4
| 29
| 0.785714
| 12
| 112
| 7
| 0.5
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 112
| 4
| 30
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6863c550eaec3bc0ea1d7a84d8bb3e36970a474b
| 108
|
py
|
Python
|
model/__init__.py
|
Cppowboy/StaticHyperNetwork
|
63c9cc17d1ebf9809129e736bbfddf1bf0374fdd
|
[
"Apache-2.0"
] | null | null | null |
model/__init__.py
|
Cppowboy/StaticHyperNetwork
|
63c9cc17d1ebf9809129e736bbfddf1bf0374fdd
|
[
"Apache-2.0"
] | null | null | null |
model/__init__.py
|
Cppowboy/StaticHyperNetwork
|
63c9cc17d1ebf9809129e736bbfddf1bf0374fdd
|
[
"Apache-2.0"
] | null | null | null |
from model.utils import ConvWeight
from model.simple_cnn import SimpleCNN
from model.resnet import Resnet50
| 27
| 38
| 0.861111
| 16
| 108
| 5.75
| 0.625
| 0.293478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.111111
| 108
| 3
| 39
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
688f22d0e1b6bb772f8cf5ff99bd5fa66b91b241
| 38
|
py
|
Python
|
napari/_vispy/__init__.py
|
ctrueden/napari
|
4096e71b7e1fa041a62f4ac2f6853fba60c93e52
|
[
"BSD-3-Clause"
] | null | null | null |
napari/_vispy/__init__.py
|
ctrueden/napari
|
4096e71b7e1fa041a62f4ac2f6853fba60c93e52
|
[
"BSD-3-Clause"
] | 1
|
2019-09-18T22:59:55.000Z
|
2019-09-23T16:41:08.000Z
|
napari/_vispy/__init__.py
|
ctrueden/napari
|
4096e71b7e1fa041a62f4ac2f6853fba60c93e52
|
[
"BSD-3-Clause"
] | null | null | null |
from .util import create_vispy_visual
| 19
| 37
| 0.868421
| 6
| 38
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
68a08c87197128f30e06562b35b1dd84523134f1
| 70
|
py
|
Python
|
starter_code/api_keys.py
|
BluecellChen/python-api-challenge
|
2f4f13ab30605e79f99da006ec540354e6af6690
|
[
"ADSL"
] | null | null | null |
starter_code/api_keys.py
|
BluecellChen/python-api-challenge
|
2f4f13ab30605e79f99da006ec540354e6af6690
|
[
"ADSL"
] | null | null | null |
starter_code/api_keys.py
|
BluecellChen/python-api-challenge
|
2f4f13ab30605e79f99da006ec540354e6af6690
|
[
"ADSL"
] | null | null | null |
# OpenWeatherMap API Key
api_key = "0bd030fc740da7bfa74a2132d1baafb2"
| 23.333333
| 44
| 0.842857
| 6
| 70
| 9.666667
| 0.666667
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253968
| 0.1
| 70
| 2
| 45
| 35
| 0.666667
| 0.314286
| 0
| 0
| 0
| 0
| 0.695652
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d7b0dc38b54c0dc4623e901d22cfa8220851fda2
| 87
|
py
|
Python
|
app/goods/__init__.py
|
NamelessAshone/trade_system
|
f4fbd14f84962a22aef41a719d3996d8cd691148
|
[
"MIT"
] | 2
|
2018-09-07T02:39:37.000Z
|
2018-10-18T13:59:38.000Z
|
app/goods/__init__.py
|
NamelessAshone/trade_system
|
f4fbd14f84962a22aef41a719d3996d8cd691148
|
[
"MIT"
] | null | null | null |
app/goods/__init__.py
|
NamelessAshone/trade_system
|
f4fbd14f84962a22aef41a719d3996d8cd691148
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
goods = Blueprint('goods', __name__)
from . import views
| 14.5
| 36
| 0.758621
| 11
| 87
| 5.636364
| 0.636364
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16092
| 87
| 5
| 37
| 17.4
| 0.849315
| 0
| 0
| 0
| 0
| 0
| 0.057471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
d7fe8996816705351d67f426580a66f8062e2e68
| 168
|
py
|
Python
|
reforms/fields/__init__.py
|
boardpack/reforms
|
34121cf4d140ed5753e6b2f5b4a4086587d06c81
|
[
"MIT"
] | 14
|
2021-08-13T22:37:04.000Z
|
2022-03-25T15:30:13.000Z
|
reforms/fields/__init__.py
|
boardpack/reforms
|
34121cf4d140ed5753e6b2f5b4a4086587d06c81
|
[
"MIT"
] | 22
|
2021-06-22T23:41:11.000Z
|
2022-03-01T04:05:51.000Z
|
reforms/fields/__init__.py
|
boardpack/reforms
|
34121cf4d140ed5753e6b2f5b4a4086587d06c81
|
[
"MIT"
] | 2
|
2021-09-02T00:27:24.000Z
|
2021-11-20T21:43:00.000Z
|
from .base import BaseField
from .bool_field import BooleanField
from .email_field import EmailField
from .hidden import HiddenField
from .str_field import StringField
| 28
| 36
| 0.85119
| 23
| 168
| 6.086957
| 0.565217
| 0.235714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 168
| 5
| 37
| 33.6
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cc1171fe9758f744aae0836bc3551c72efdaa743
| 34
|
py
|
Python
|
pydemo/__init__.py
|
ciiseven/pydemo
|
789cc4b26d05c9faf856f6f0ee3956f47c034155
|
[
"MIT"
] | null | null | null |
pydemo/__init__.py
|
ciiseven/pydemo
|
789cc4b26d05c9faf856f6f0ee3956f47c034155
|
[
"MIT"
] | null | null | null |
pydemo/__init__.py
|
ciiseven/pydemo
|
789cc4b26d05c9faf856f6f0ee3956f47c034155
|
[
"MIT"
] | null | null | null |
from .pydemo import version, demo
| 17
| 33
| 0.794118
| 5
| 34
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 1
| 34
| 34
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cc274bb2b2a2bff06d504b9789bc03f433514768
| 11,339
|
py
|
Python
|
tests/test_logic.py
|
masaminh/keiba_fetcher
|
ff386ba5ee0ac15cd36f8707258051538f781d0c
|
[
"MIT"
] | null | null | null |
tests/test_logic.py
|
masaminh/keiba_fetcher
|
ff386ba5ee0ac15cd36f8707258051538f781d0c
|
[
"MIT"
] | 5
|
2021-03-31T19:29:03.000Z
|
2022-03-26T07:11:53.000Z
|
tests/test_logic.py
|
masaminh/keiba_fetcher
|
ff386ba5ee0ac15cd36f8707258051538f781d0c
|
[
"MIT"
] | null | null | null |
"""logicのテスト."""
from datetime import datetime, timezone
from unittest import mock
import src.logic as logic
def test_entry():
"""entry()のテスト."""
with mock.patch('src.logic.main_loop') as n:
logic.entry(
'QUEUE',
'BUCKET',
datetime(
2019,
12,
15,
tzinfo=timezone.utc))
n.assert_called_once_with(
'QUEUE', 'BUCKET',
datetime(2019, 12, 15, tzinfo=timezone.utc))
def test_fetch():
"""fetch()のテスト."""
nowtime = datetime(2019, 12, 1, 12, 0, 0)
with mock.patch('src.logic.get_fetcher') as m:
logic.fetch(
'https://www.yahoo.co.jp', 'http://referer', 'bucket', nowtime)
m.assert_called_once_with('https://www.yahoo.co.jp', 'http://referer')
m.return_value.fetch.assert_called_once_with('bucket', nowtime)
def test_get_s3_object():
"""get_s3_object()のテスト."""
with mock.patch('boto3.resource') as m:
n = mock.MagicMock(
key='key', last_modified=datetime(
2019, 12, 1, 10, 0, 0))
m.return_value.Bucket.return_value.objects.filter.return_value = [n]
s3object = logic.get_s3_object('bucket', 'key')
assert s3object.last_modified == datetime(2019, 12, 1, 10, 0, 0)
def test_get_s3_object_none():
"""get_s3_object()のテスト."""
with mock.patch('boto3.resource') as m:
m.return_value.Bucket.return_value.objects.filter.return_value = []
s3object = logic.get_s3_object('bucket', 'key')
assert s3object is None
def test_fetch_to_s3():
"""fetch_to_s3()のテスト."""
uri = 'http://host/path'
bucket = 'bucket'
key = 'key'
with mock.patch('requests.get') as get:
with mock.patch('boto3.resource') as resource:
get.return_value.status_code = 200
get.return_value.content = b'1'
content = logic.fetch_to_s3(uri, bucket, key)
assert content == b'1'
get.assert_called_once_with(uri, timeout=10)
resource.assert_called_once_with('s3')
resource.return_value.Bucket.assert_called_once_with(bucket)
resource.return_value.Bucket.return_value.put_object.\
assert_called_once_with(Key=key, Body=b'1')
def test_fetch_to_s3_error():
"""fetch_to_s3()のテスト."""
uri = 'http://host/path'
bucket = 'bucket'
key = 'key'
with mock.patch('requests.get') as get:
with mock.patch('boto3.resource') as resource:
get.return_value.status_code = 500
get.return_value.content = b'1'
content = logic.fetch_to_s3(uri, bucket, key)
assert content is None
get.assert_called_once_with(uri, timeout=10)
resource.assert_not_called()
def test_get_fetcher_jbis_calendar():
"""get_fetcher()のテスト."""
fetcher = logic.get_fetcher(
'https://www.jbis.or.jp/race/calendar/', 'http://referer')
assert isinstance(fetcher, logic.JbisCalendarFetcher)
def test_get_fetcher_jbis_race_list():
"""get_fetcher()のテスト."""
fetcher = logic.get_fetcher(
'https://www.jbis.or.jp/race/calendar/20200322/231/', 'http://referer')
assert isinstance(fetcher, logic.JbisRaceListFetcher)
def test_get_fetcher_unknown():
"""get_fetcher()のテスト."""
fetcher = logic.get_fetcher('https://www.yahoo.co.jp', 'http://referer')
assert isinstance(fetcher, logic.DefaultFetcher)
def test_get_jbis_calendar_fetcher_fetch():
"""JbisCalendarFetcher.fetch()のテスト."""
uri = 'https://www.jbis.or.jp/race/calendar/?year=2019&month=02'
bucket = 'bucket'
key = 'jbis/race/calendar/2019/02'
s3time = datetime(2019, 12, 1, 12, 0, 0)
nowtime = datetime(2019, 12, 2, 12, 0, 0)
content = (
b'<html><body><ul class="list-icon-01"><a href="/a" /></ul>' +
b'<ul class="list-icon-01"><a href="/b" /></ul></body></html>')
fetcher = logic.JbisCalendarFetcher(uri)
with mock.patch('src.logic.get_s3_object') as m:
with mock.patch('src.logic.fetch_to_s3') as n:
m.return_value.last_modified = s3time
n.return_value = content
uris = fetcher.fetch(bucket, nowtime)
m.assert_called_once_with(bucket, key)
n.assert_called_once_with(uri, bucket, key)
assert uris == [
'https://www.jbis.or.jp/a',
'https://www.jbis.or.jp/b']
def test_get_jbis_calendar_fetcher_fetch_newobject():
"""JbisCalendarFetcher.fetch()のテスト."""
uri = 'https://www.jbis.or.jp/race/calendar/?year=2019&month=02'
bucket = 'bucket'
key = 'jbis/race/calendar/2019/02'
s3time = datetime(2019, 12, 1, 12, 0, 0)
nowtime = datetime(2019, 12, 1, 13, 0, 0)
fetcher = logic.JbisCalendarFetcher(uri)
with mock.patch('src.logic.get_s3_object') as m:
with mock.patch('src.logic.fetch_to_s3') as n:
m.return_value.last_modified = s3time
uris = fetcher.fetch(bucket, nowtime)
m.assert_called_once_with(bucket, key)
n.assert_not_called()
assert uris == []
def test_get_jbis_calendar_fetcher_fetch_noobject():
"""JbisCalendarFetcher.fetch()のテスト."""
uri = 'https://www.jbis.or.jp/race/calendar/?year=2019&month=02'
bucket = 'bucket'
key = 'jbis/race/calendar/2019/02'
nowtime = datetime(2019, 12, 1, 13, 0, 0, tzinfo=timezone.utc)
content = (
b'<html><body><ul class="list-icon-01"><a href="/a" /></ul>' +
b'<ul class="list-icon-01"><a href="/b" /></ul></body></html>')
fetcher = logic.JbisCalendarFetcher(uri)
with mock.patch('src.logic.get_s3_object', return_value=None) as m:
with mock.patch('src.logic.fetch_to_s3') as n:
n.return_value = content
uris = fetcher.fetch(bucket, nowtime)
m.assert_called_once_with(bucket, key)
n.assert_called_once_with(uri, bucket, key)
assert uris == [
'https://www.jbis.or.jp/a',
'https://www.jbis.or.jp/b']
def test_get_jbis_calendar_fetcher_get_s3_key():
"""JbisCalendarFetcher.get_s3_key()のテスト."""
fetcher = logic.JbisCalendarFetcher(
'https://www.jbis.or.jp/race/calendar/?year=2019&month=02')
key = fetcher.get_s3_key()
assert key == 'jbis/race/calendar/2019/02'
def test_get_jbis_calendar_fetcher_get_next_uris():
"""JbisCalendarFetcher.get_next_uris()のテスト."""
content = (
b'<html><body><ul class="list-icon-01"><a href="/a" /></ul>' +
b'<ul class="list-icon-01"><a href="/b" /></ul></body></html>')
fetcher = logic.JbisCalendarFetcher(
'https://www.jbis.or.jp/race/calendar/?year=2019&month=02')
uris = fetcher.get_next_uris(content)
assert uris == ['https://www.jbis.or.jp/a', 'https://www.jbis.or.jp/b']
def test_get_jbis_racelist_fetcher_fetch_result():
"""JbisRaceListFetcher.fetch()のテスト."""
uri = 'https://www.jbis.or.jp/race/calendar/20200317/220/'
bucket = 'bucket'
key = 'jbis/race/calendar/20200317/220'
s3time = datetime(2020, 3, 18, 12, 0, 0)
nowtime = datetime(2020, 3, 19, 12, 0, 0)
contentstr = (
'<html>' +
'<meta http-equiv="Content-Type" content="text/html; ' +
'charset=Shift_JIS">' +
'<body><table class="tbl-data-04">' +
'<thead><tr><th>R</th><th>レース名</th><th>距離</th>' +
'<th></th><th></th><th></th><th></th><th></th></tr></thead>' +
'<tbody><tr><th>1</th><td><a href="/a">レース1</a></td><td>ダ1200m</td>' +
'<td></td><td></td><td></td><td></td><td></td></tr></tbody>' +
'</table></body></html>')
content = contentstr.encode('shift_jis')
fetcher = logic.JbisRaceListFetcher(uri)
with mock.patch('src.logic.get_s3_object') as m:
m.return_value.last_modified = s3time
body = mock.MagicMock(read=mock.MagicMock(return_value=content))
m.return_value.get.return_value = {'Body': body}
with mock.patch('src.logic.fetch_to_s3') as n:
n.return_value = content
uris = fetcher.fetch(bucket, nowtime)
m.assert_called_once_with(bucket, key)
n.assert_not_called()
assert uris == [
'https://www.jbis.or.jp/a']
def test_get_jbis_racelist_fetcher_fetch_entry():
"""JbisRaceListFetcher.fetch()のテスト."""
uri = 'https://www.jbis.or.jp/race/calendar/20200319/220/'
bucket = 'bucket'
key = 'jbis/race/calendar/20200319/220'
s3time = datetime(2020, 3, 18, 12, 0, 0)
nowtime = datetime(2020, 3, 19, 12, 0, 0)
contentstr = (
'<html>' +
'<meta http-equiv="Content-Type" content="text/html; ' +
'charset=Shift_JIS">' +
'<body><table class="tbl-data-04">' +
'<thead><tr><th>R</th><th>発走時刻</th><th>レース名</th>' +
'<th>芝ダ</th><th></th><th></th><th></th></tr></thead>' +
'<tbody><tr><th>1</th><td></td><td><a href="/a">レース1</a></td>' +
'<td>ダ</td>' +
'<td></td><td></td><td></td></tr></tbody>' +
'</table></body></html>')
content = contentstr.encode('shift_jis')
fetcher = logic.JbisRaceListFetcher(uri)
with mock.patch('src.logic.get_s3_object') as m:
m.return_value.last_modified = s3time
body = mock.MagicMock(read=mock.MagicMock(return_value=content))
m.return_value.get.return_value = {'Body': body}
with mock.patch('src.logic.fetch_to_s3') as n:
n.return_value = content
uris = fetcher.fetch(bucket, nowtime)
m.assert_called_once_with(bucket, key)
n.assert_called_once()
assert uris == [
'https://www.jbis.or.jp/a']
def test_get_jbis_racelist_fetcher_fetch_stakes_entry():
"""JbisRaceListFetcher.fetch()のテスト."""
uri = 'https://www.jbis.or.jp/race/calendar/20200322/231/'
bucket = 'bucket'
key = 'jbis/race/calendar/20200322/231'
s3time = datetime(2020, 3, 18, 12, 0, 0)
nowtime = datetime(2020, 3, 19, 12, 0, 0)
contentstr = (
'<html>' +
'<meta http-equiv="Content-Type" content="text/html; ' +
'charset=Shift_JIS">' +
'<body><table class="tbl-data-04">' +
'<thead><tr><th>R</th><th>レース名</th>' +
'<th>芝ダ</th><th></th><th></th><th></th></tr></thead>' +
'<tbody><tr><th>-</th><td>レース1</td>' +
'<td>ダ</td>' +
'<td></td><td></td><td></td></tr></tbody>' +
'</table></body></html>')
content = contentstr.encode('shift_jis')
fetcher = logic.JbisRaceListFetcher(uri)
with mock.patch('src.logic.get_s3_object') as m:
m.return_value.last_modified = s3time
body = mock.MagicMock(read=mock.MagicMock(return_value=content))
m.return_value.get.return_value = {'Body': body}
with mock.patch('src.logic.fetch_to_s3') as n:
n.return_value = content
uris = fetcher.fetch(bucket, nowtime)
m.assert_called_once_with(bucket, key)
n.assert_called_once()
assert uris == []
def test_default_fetcher_fetch():
"""DefaultFetcher.fetch()のテスト."""
with mock.patch('src.logic.logger') as m:
logic.DefaultFetcher('abc').fetch(
'bucket', datetime(2019, 12, 1, 12, 0, 0))
m.warning.assert_called_once()
| 36.814935
| 79
| 0.600053
| 1,532
| 11,339
| 4.280026
| 0.094648
| 0.057038
| 0.041635
| 0.038432
| 0.877535
| 0.844289
| 0.808296
| 0.755833
| 0.746835
| 0.699558
| 0
| 0.045486
| 0.226387
| 11,339
| 307
| 80
| 36.934853
| 0.702006
| 0.042155
| 0
| 0.625
| 0
| 0.081897
| 0.277566
| 0.130608
| 0
| 0
| 0
| 0
| 0.159483
| 1
| 0.077586
| false
| 0
| 0.012931
| 0
| 0.090517
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0403ddfadb6c37d892449d0f4f5e3d1604b419f4
| 198
|
py
|
Python
|
widgets/custombutton.py
|
JOSBEAK/HangMan-Project
|
07233d4a44b3bdaedb1615f0b92d48e5fef50f5b
|
[
"MIT"
] | 16
|
2021-08-31T04:00:51.000Z
|
2022-02-11T00:35:35.000Z
|
widgets/custombutton.py
|
JOSBEAK/HangMan-Project
|
07233d4a44b3bdaedb1615f0b92d48e5fef50f5b
|
[
"MIT"
] | null | null | null |
widgets/custombutton.py
|
JOSBEAK/HangMan-Project
|
07233d4a44b3bdaedb1615f0b92d48e5fef50f5b
|
[
"MIT"
] | 1
|
2021-09-25T07:05:07.000Z
|
2021-09-25T07:05:07.000Z
|
from kivymd.uix.button import MDFillRoundFlatIconButton
from kivy.lang.builder import Builder
Builder.load_file('widgets/custombutton.kv')
class CustomButton(MDFillRoundFlatIconButton):
pass
| 22
| 55
| 0.833333
| 22
| 198
| 7.454545
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09596
| 198
| 8
| 56
| 24.75
| 0.916201
| 0
| 0
| 0
| 0
| 0
| 0.116162
| 0.116162
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0423d27e4dba0cfdf8f24d7759d3f79457a99731
| 182
|
py
|
Python
|
mmhuman3d/data/data_converters/builder.py
|
ykk648/mmhuman3d
|
26af92bcf6abbe1855e1a8a48308621410f9c047
|
[
"Apache-2.0"
] | 472
|
2021-12-03T03:12:55.000Z
|
2022-03-31T01:33:13.000Z
|
mmhuman3d/data/data_converters/builder.py
|
ykk648/mmhuman3d
|
26af92bcf6abbe1855e1a8a48308621410f9c047
|
[
"Apache-2.0"
] | 127
|
2021-12-03T05:00:14.000Z
|
2022-03-31T13:47:33.000Z
|
mmhuman3d/data/data_converters/builder.py
|
ykk648/mmhuman3d
|
26af92bcf6abbe1855e1a8a48308621410f9c047
|
[
"Apache-2.0"
] | 37
|
2021-12-03T03:23:22.000Z
|
2022-03-31T08:41:58.000Z
|
from mmcv.utils import Registry
DATA_CONVERTERS = Registry('data_converters')
def build_data_converter(cfg):
"""Build data converter."""
return DATA_CONVERTERS.build(cfg)
| 20.222222
| 45
| 0.758242
| 23
| 182
| 5.782609
| 0.521739
| 0.315789
| 0.330827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137363
| 182
| 8
| 46
| 22.75
| 0.847134
| 0.115385
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
f0cd913c517f7c74c65a7c557230c8ef807f17c9
| 248
|
py
|
Python
|
avanzapy/warrant.py
|
alrevuelta/avanzapy
|
5a723607a3e1b5028172239372bb51ad6ac9978e
|
[
"MIT"
] | 2
|
2021-04-15T13:34:52.000Z
|
2021-08-24T17:32:26.000Z
|
avanzapy/warrant.py
|
alrevuelta/avanzapy
|
5a723607a3e1b5028172239372bb51ad6ac9978e
|
[
"MIT"
] | null | null | null |
avanzapy/warrant.py
|
alrevuelta/avanzapy
|
5a723607a3e1b5028172239372bb51ad6ac9978e
|
[
"MIT"
] | 1
|
2022-02-03T08:30:44.000Z
|
2022-02-03T08:30:44.000Z
|
from avanzapy.instrument import Instrument
from avanzapy.constants import InstrumentType
class Warrant(Instrument):
def __init__(self, raw_data, historical_data=[]):
super().__init__(InstrumentType.WARRANT, raw_data, historical_data)
| 31
| 75
| 0.790323
| 28
| 248
| 6.571429
| 0.535714
| 0.130435
| 0.184783
| 0.228261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 248
| 7
| 76
| 35.428571
| 0.847926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9bc51c3299f2f01bf178836d259ed6ec983954ab
| 873
|
py
|
Python
|
venv/Lib/site-packages/tensorflow/estimator/export/__init__.py
|
caiovini/Image_reader_api
|
7fae630a17195d3415eb739278ef21a3b58cae76
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow/estimator/export/__init__.py
|
caiovini/Image_reader_api
|
7fae630a17195d3415eb739278ef21a3b58cae76
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow/estimator/export/__init__.py
|
caiovini/Image_reader_api
|
7fae630a17195d3415eb739278ef21a3b58cae76
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""Public API for tf.estimator.export namespace.
"""
from __future__ import print_function
from tensorflow.python.estimator.export.export import ServingInputReceiver
from tensorflow.python.estimator.export.export import TensorServingInputReceiver
from tensorflow.python.estimator.export.export import build_parsing_serving_input_receiver_fn
from tensorflow.python.estimator.export.export import build_raw_serving_input_receiver_fn
from tensorflow.python.estimator.export.export_lib import ClassificationOutput
from tensorflow.python.estimator.export.export_lib import ExportOutput
from tensorflow.python.estimator.export.export_lib import PredictOutput
from tensorflow.python.estimator.export.export_lib import RegressionOutput
del print_function
| 48.5
| 93
| 0.870561
| 112
| 873
| 6.589286
| 0.383929
| 0.182927
| 0.216802
| 0.314363
| 0.598916
| 0.598916
| 0.598916
| 0.471545
| 0.170732
| 0.170732
| 0
| 0
| 0.069874
| 873
| 17
| 94
| 51.352941
| 0.908867
| 0.189003
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.9
| 0
| 0.9
| 0.2
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9bd8a3c6f6f2e8d599b10b081ace166e5eacf4a3
| 33
|
py
|
Python
|
twcloud/__init__.py
|
minimaxir/twcloud
|
0251b4872fa17e342db3fdf280aa21d8909e94e4
|
[
"MIT"
] | 77
|
2019-11-27T15:54:55.000Z
|
2021-06-17T00:25:22.000Z
|
twcloud/__init__.py
|
minimaxir/twcloud
|
0251b4872fa17e342db3fdf280aa21d8909e94e4
|
[
"MIT"
] | 1
|
2021-04-30T06:46:50.000Z
|
2021-04-30T06:46:50.000Z
|
twcloud/__init__.py
|
minimaxir/twcloud
|
0251b4872fa17e342db3fdf280aa21d8909e94e4
|
[
"MIT"
] | 3
|
2019-12-20T09:37:41.000Z
|
2021-05-14T10:26:04.000Z
|
from .twcloud import gen_twcloud
| 16.5
| 32
| 0.848485
| 5
| 33
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5009a69dc629fb31b5e3b8ac2dbeaeba0175c7ae
| 85
|
py
|
Python
|
dame_flame/utils/__init__.py
|
saksham-jain01/DAME-FLAME-Python-Package
|
1362baeadc05cf7ba368e40b0f2873c758c0c515
|
[
"MIT"
] | 43
|
2020-08-10T20:51:49.000Z
|
2022-03-09T08:50:37.000Z
|
dame_flame/utils/__init__.py
|
saksham-jain01/DAME-FLAME-Python-Package
|
1362baeadc05cf7ba368e40b0f2873c758c0c515
|
[
"MIT"
] | 31
|
2020-02-11T20:29:26.000Z
|
2022-02-26T10:08:17.000Z
|
dame_flame/utils/__init__.py
|
saksham-jain01/DAME-FLAME-Python-Package
|
1362baeadc05cf7ba368e40b0f2873c758c0c515
|
[
"MIT"
] | 22
|
2020-05-07T23:53:53.000Z
|
2021-08-05T14:41:59.000Z
|
"""
__init__ file within utils
"""
from . import post_processing
from . import data
| 12.142857
| 29
| 0.729412
| 11
| 85
| 5.181818
| 0.818182
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 85
| 6
| 30
| 14.166667
| 0.814286
| 0.305882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac9144137fc8d175f7109d97ce9bb79fac869f41
| 163
|
py
|
Python
|
api-v2/ContactForm/admin.py
|
MikeChurvis/mikechurvis.github.io
|
4271e5bb555f4ea1d6781f50b4344eb3bec1761c
|
[
"MIT"
] | 1
|
2022-01-26T16:58:40.000Z
|
2022-01-26T16:58:40.000Z
|
api-v2/ContactForm/admin.py
|
MikeChurvis/mikechurvis.github.io
|
4271e5bb555f4ea1d6781f50b4344eb3bec1761c
|
[
"MIT"
] | null | null | null |
api-v2/ContactForm/admin.py
|
MikeChurvis/mikechurvis.github.io
|
4271e5bb555f4ea1d6781f50b4344eb3bec1761c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import ContactFormEntry
@admin.register(ContactFormEntry)
class ContactFormEntryAdmin(admin.ModelAdmin):
pass
| 18.111111
| 46
| 0.822086
| 17
| 163
| 7.882353
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116564
| 163
| 8
| 47
| 20.375
| 0.930556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
acf0996ff1266d915aeabb9ef3a8ff306d1e9121
| 43,346
|
py
|
Python
|
cookbook/lib/python3.7/site-packages/google/cloud/firestore_admin_v1/proto/operation_pb2.py
|
ethanga12/cookbooktbd
|
bc310546f4b05d29a24eff79242c252a086d7260
|
[
"Apache-2.0"
] | 1
|
2021-01-15T18:00:01.000Z
|
2021-01-15T18:00:01.000Z
|
cookbook/lib/python3.7/site-packages/google/cloud/firestore_admin_v1/proto/operation_pb2.py
|
ethanga12/cookbooktbd
|
bc310546f4b05d29a24eff79242c252a086d7260
|
[
"Apache-2.0"
] | null | null | null |
cookbook/lib/python3.7/site-packages/google/cloud/firestore_admin_v1/proto/operation_pb2.py
|
ethanga12/cookbooktbd
|
bc310546f4b05d29a24eff79242c252a086d7260
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/firestore_admin_v1/proto/operation.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.cloud.firestore_admin_v1.proto import (
index_pb2 as google_dot_cloud_dot_firestore__admin__v1_dot_proto_dot_index__pb2,
)
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/firestore_admin_v1/proto/operation.proto",
package="google.firestore.admin.v1",
syntax="proto3",
serialized_options=b"\n\035com.google.firestore.admin.v1B\016OperationProtoP\001Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\242\002\004GCFS\252\002\037Google.Cloud.Firestore.Admin.V1\312\002\037Google\\Cloud\\Firestore\\Admin\\V1\352\002#Google::Cloud::Firestore::Admin::V1",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n5google/cloud/firestore_admin_v1/proto/operation.proto\x12\x19google.firestore.admin.v1\x1a\x31google/cloud/firestore_admin_v1/proto/index.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xbd\x02\n\x16IndexOperationMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05index\x18\x03 \x01(\t\x12\x38\n\x05state\x18\x04 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x06 \x01(\x0b\x32#.google.firestore.admin.v1.Progress"\x88\x05\n\x16\x46ieldOperationMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05\x66ield\x18\x03 \x01(\t\x12_\n\x13index_config_deltas\x18\x04 \x03(\x0b\x32\x42.google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta\x12\x38\n\x05state\x18\x05 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x06 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x07 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x1a\xe7\x01\n\x10IndexConfigDelta\x12\x62\n\x0b\x63hange_type\x18\x01 \x01(\x0e\x32M.google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.ChangeType\x12/\n\x05index\x18\x02 \x01(\x0b\x32 .google.firestore.admin.v1.Index">\n\nChangeType\x12\x1b\n\x17\x43HANGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41\x44\x44\x10\x01\x12\n\n\x06REMOVE\x10\x02"\xec\x02\n\x17\x45xportDocumentsMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x0foperation_state\x18\x03 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x04 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12\x16\n\x0e\x63ollection_ids\x18\x06 \x03(\t\x12\x19\n\x11output_uri_prefix\x18\x07 \x01(\t"\xeb\x02\n\x17ImportDocumentsMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x0foperation_state\x18\x03 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x04 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12\x16\n\x0e\x63ollection_ids\x18\x06 \x03(\t\x12\x18\n\x10input_uri_prefix\x18\x07 \x01(\t"4\n\x17\x45xportDocumentsResponse\x12\x19\n\x11output_uri_prefix\x18\x01 \x01(\t":\n\x08Progress\x12\x16\n\x0e\x65stimated_work\x18\x01 \x01(\x03\x12\x16\n\x0e\x63ompleted_work\x18\x02 \x01(\x03*\x9e\x01\n\x0eOperationState\x12\x1f\n\x1bOPERATION_STATE_UNSPECIFIED\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x0e\n\nPROCESSING\x10\x02\x12\x0e\n\nCANCELLING\x10\x03\x12\x0e\n\nFINALIZING\x10\x04\x12\x0e\n\nSUCCESSFUL\x10\x05\x12\n\n\x06\x46\x41ILED\x10\x06\x12\r\n\tCANCELLED\x10\x07\x42\xe2\x01\n\x1d\x63om.google.firestore.admin.v1B\x0eOperationProtoP\x01Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\xa2\x02\x04GCFS\xaa\x02\x1fGoogle.Cloud.Firestore.Admin.V1\xca\x02\x1fGoogle\\Cloud\\Firestore\\Admin\\V1\xea\x02#Google::Cloud::Firestore::Admin::V1b\x06proto3',
dependencies=[
google_dot_cloud_dot_firestore__admin__v1_dot_proto_dot_index__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_OPERATIONSTATE = _descriptor.EnumDescriptor(
name="OperationState",
full_name="google.firestore.admin.v1.OperationState",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="OPERATION_STATE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="INITIALIZING",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="PROCESSING",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="CANCELLING",
index=3,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="FINALIZING",
index=4,
number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="SUCCESSFUL",
index=5,
number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="FAILED",
index=6,
number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="CANCELLED",
index=7,
number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=2017,
serialized_end=2175,
)
_sym_db.RegisterEnumDescriptor(_OPERATIONSTATE)
OperationState = enum_type_wrapper.EnumTypeWrapper(_OPERATIONSTATE)
OPERATION_STATE_UNSPECIFIED = 0
INITIALIZING = 1
PROCESSING = 2
CANCELLING = 3
FINALIZING = 4
SUCCESSFUL = 5
FAILED = 6
CANCELLED = 7
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE = _descriptor.EnumDescriptor(
name="ChangeType",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.ChangeType",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="CHANGE_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="ADD",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="REMOVE",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=1105,
serialized_end=1167,
)
_sym_db.RegisterEnumDescriptor(_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE)
_INDEXOPERATIONMETADATA = _descriptor.Descriptor(
name="IndexOperationMetadata",
full_name="google.firestore.admin.v1.IndexOperationMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.IndexOperationMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.IndexOperationMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="index",
full_name="google.firestore.admin.v1.IndexOperationMetadata.index",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.firestore.admin.v1.IndexOperationMetadata.state",
index=3,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.IndexOperationMetadata.progress_documents",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.IndexOperationMetadata.progress_bytes",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=199,
serialized_end=516,
)
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA = _descriptor.Descriptor(
name="IndexConfigDelta",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="change_type",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.change_type",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="index",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.index",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE,],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=936,
serialized_end=1167,
)
_FIELDOPERATIONMETADATA = _descriptor.Descriptor(
name="FieldOperationMetadata",
full_name="google.firestore.admin.v1.FieldOperationMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.FieldOperationMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.FieldOperationMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="field",
full_name="google.firestore.admin.v1.FieldOperationMetadata.field",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="index_config_deltas",
full_name="google.firestore.admin.v1.FieldOperationMetadata.index_config_deltas",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.firestore.admin.v1.FieldOperationMetadata.state",
index=4,
number=5,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.FieldOperationMetadata.progress_documents",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.FieldOperationMetadata.progress_bytes",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=519,
serialized_end=1167,
)
_EXPORTDOCUMENTSMETADATA = _descriptor.Descriptor(
name="ExportDocumentsMetadata",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="operation_state",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.operation_state",
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.progress_documents",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.progress_bytes",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="collection_ids",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.collection_ids",
index=5,
number=6,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="output_uri_prefix",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.output_uri_prefix",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1170,
serialized_end=1534,
)
_IMPORTDOCUMENTSMETADATA = _descriptor.Descriptor(
name="ImportDocumentsMetadata",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="operation_state",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.operation_state",
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.progress_documents",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.progress_bytes",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="collection_ids",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.collection_ids",
index=5,
number=6,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="input_uri_prefix",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.input_uri_prefix",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1537,
serialized_end=1900,
)
_EXPORTDOCUMENTSRESPONSE = _descriptor.Descriptor(
name="ExportDocumentsResponse",
full_name="google.firestore.admin.v1.ExportDocumentsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="output_uri_prefix",
full_name="google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1902,
serialized_end=1954,
)
_PROGRESS = _descriptor.Descriptor(
name="Progress",
full_name="google.firestore.admin.v1.Progress",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="estimated_work",
full_name="google.firestore.admin.v1.Progress.estimated_work",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="completed_work",
full_name="google.firestore.admin.v1.Progress.completed_work",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1956,
serialized_end=2014,
)
_INDEXOPERATIONMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_INDEXOPERATIONMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_INDEXOPERATIONMETADATA.fields_by_name["state"].enum_type = _OPERATIONSTATE
_INDEXOPERATIONMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_INDEXOPERATIONMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.fields_by_name[
"change_type"
].enum_type = _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.fields_by_name[
"index"
].message_type = (
google_dot_cloud_dot_firestore__admin__v1_dot_proto_dot_index__pb2._INDEX
)
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.containing_type = _FIELDOPERATIONMETADATA
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE.containing_type = (
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA
)
_FIELDOPERATIONMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_FIELDOPERATIONMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_FIELDOPERATIONMETADATA.fields_by_name[
"index_config_deltas"
].message_type = _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA
_FIELDOPERATIONMETADATA.fields_by_name["state"].enum_type = _OPERATIONSTATE
_FIELDOPERATIONMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_FIELDOPERATIONMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
_EXPORTDOCUMENTSMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EXPORTDOCUMENTSMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EXPORTDOCUMENTSMETADATA.fields_by_name["operation_state"].enum_type = _OPERATIONSTATE
_EXPORTDOCUMENTSMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_EXPORTDOCUMENTSMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
_IMPORTDOCUMENTSMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_IMPORTDOCUMENTSMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_IMPORTDOCUMENTSMETADATA.fields_by_name["operation_state"].enum_type = _OPERATIONSTATE
_IMPORTDOCUMENTSMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_IMPORTDOCUMENTSMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
DESCRIPTOR.message_types_by_name["IndexOperationMetadata"] = _INDEXOPERATIONMETADATA
DESCRIPTOR.message_types_by_name["FieldOperationMetadata"] = _FIELDOPERATIONMETADATA
DESCRIPTOR.message_types_by_name["ExportDocumentsMetadata"] = _EXPORTDOCUMENTSMETADATA
DESCRIPTOR.message_types_by_name["ImportDocumentsMetadata"] = _IMPORTDOCUMENTSMETADATA
DESCRIPTOR.message_types_by_name["ExportDocumentsResponse"] = _EXPORTDOCUMENTSRESPONSE
DESCRIPTOR.message_types_by_name["Progress"] = _PROGRESS
DESCRIPTOR.enum_types_by_name["OperationState"] = _OPERATIONSTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IndexOperationMetadata = _reflection.GeneratedProtocolMessageType(
"IndexOperationMetadata",
(_message.Message,),
{
"DESCRIPTOR": _INDEXOPERATIONMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreA
dmin.CreateIndex].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
index:
The index resource that this operation is acting on. For
example: ``projects/{project_id}/databases/{database_id}/colle
ctionGroups/{collection_id}/indexes/{index_id}``
state:
The state of the operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.IndexOperationMetadata)
},
)
_sym_db.RegisterMessage(IndexOperationMetadata)
FieldOperationMetadata = _reflection.GeneratedProtocolMessageType(
"FieldOperationMetadata",
(_message.Message,),
{
"IndexConfigDelta": _reflection.GeneratedProtocolMessageType(
"IndexConfigDelta",
(_message.Message,),
{
"DESCRIPTOR": _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Information about an index configuration change.
Attributes:
change_type:
Specifies how the index is changing.
index:
The index being changed.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta)
},
),
"DESCRIPTOR": _FIELDOPERATIONMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreA
dmin.UpdateField].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
field:
The field resource that this operation is acting on. For
example: ``projects/{project_id}/databases/{database_id}/colle
ctionGroups/{collection_id}/fields/{field_path}``
index_config_deltas:
A list of [IndexConfigDelta][google.firestore.admin.v1.FieldOp
erationMetadata.IndexConfigDelta], which describe the intent
of this operation.
state:
The state of the operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.FieldOperationMetadata)
},
)
_sym_db.RegisterMessage(FieldOperationMetadata)
_sym_db.RegisterMessage(FieldOperationMetadata.IndexConfigDelta)
ExportDocumentsMetadata = _reflection.GeneratedProtocolMessageType(
"ExportDocumentsMetadata",
(_message.Message,),
{
"DESCRIPTOR": _EXPORTDOCUMENTSMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.Firest
oreAdmin.ExportDocuments].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
operation_state:
The state of the export operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
collection_ids:
Which collection ids are being exported.
output_uri_prefix:
Where the entities are being exported to.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ExportDocumentsMetadata)
},
)
_sym_db.RegisterMessage(ExportDocumentsMetadata)
ImportDocumentsMetadata = _reflection.GeneratedProtocolMessageType(
"ImportDocumentsMetadata",
(_message.Message,),
{
"DESCRIPTOR": _IMPORTDOCUMENTSMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.Firest
oreAdmin.ImportDocuments].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
operation_state:
The state of the import operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
collection_ids:
Which collection ids are being imported.
input_uri_prefix:
The location of the documents being imported.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ImportDocumentsMetadata)
},
)
_sym_db.RegisterMessage(ImportDocumentsMetadata)
ExportDocumentsResponse = _reflection.GeneratedProtocolMessageType(
"ExportDocumentsResponse",
(_message.Message,),
{
"DESCRIPTOR": _EXPORTDOCUMENTSRESPONSE,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Returned in the
[google.longrunning.Operation][google.longrunning.Operation] response
field.
Attributes:
output_uri_prefix:
Location of the output files. This can be used to begin an
import into Cloud Firestore (this project or another project)
after the operation completes successfully.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ExportDocumentsResponse)
},
)
_sym_db.RegisterMessage(ExportDocumentsResponse)
Progress = _reflection.GeneratedProtocolMessageType(
"Progress",
(_message.Message,),
{
"DESCRIPTOR": _PROGRESS,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Describes the progress of the operation. Unit of work is generic and
must be interpreted based on where
[Progress][google.firestore.admin.v1.Progress] is used.
Attributes:
estimated_work:
The amount of work estimated.
completed_work:
The amount of work completed.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.Progress)
},
)
_sym_db.RegisterMessage(Progress)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 36.548061
| 3,531
| 0.643289
| 4,402
| 43,346
| 6.019309
| 0.077465
| 0.035023
| 0.056157
| 0.05812
| 0.78239
| 0.74933
| 0.726648
| 0.68136
| 0.631241
| 0.619315
| 0
| 0.034161
| 0.265907
| 43,346
| 1,185
| 3,532
| 36.578903
| 0.798554
| 0.019863
| 0
| 0.773169
| 1
| 0.001765
| 0.289889
| 0.187082
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030891
| 0
| 0.030891
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a055acaf4366c021268f052cf314f579e9bd0b2
| 53
|
py
|
Python
|
utils/__init__.py
|
yz-cnsdqz/MOJO-release
|
476b40c8111861c6ab6b193a68e634d9aeb4e407
|
[
"MIT"
] | 58
|
2021-06-18T17:00:06.000Z
|
2022-03-20T12:21:12.000Z
|
utils/__init__.py
|
wei-mao-2019/gsps
|
7f8de905f49bc739747174ade343a431ec8fe74e
|
[
"MIT"
] | 5
|
2021-09-10T07:04:38.000Z
|
2022-01-18T17:35:00.000Z
|
utils/__init__.py
|
wei-mao-2019/gsps
|
7f8de905f49bc739747174ade343a431ec8fe74e
|
[
"MIT"
] | 3
|
2021-06-24T04:04:07.000Z
|
2021-06-30T14:22:54.000Z
|
from utils.torch import *
from utils.logger import *
| 17.666667
| 26
| 0.773585
| 8
| 53
| 5.125
| 0.625
| 0.439024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150943
| 53
| 2
| 27
| 26.5
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c58db1de5dd3e74aa98dd90c1772bbb4f25314a7
| 29,308
|
py
|
Python
|
tests/e2e/test_api.py
|
linearcombination/DOC
|
4478e55ec81426c15a2c402cb838e76d79741c03
|
[
"MIT"
] | null | null | null |
tests/e2e/test_api.py
|
linearcombination/DOC
|
4478e55ec81426c15a2c402cb838e76d79741c03
|
[
"MIT"
] | 1
|
2022-03-28T17:44:24.000Z
|
2022-03-28T17:44:24.000Z
|
tests/e2e/test_api.py
|
linearcombination/DOC
|
4478e55ec81426c15a2c402cb838e76d79741c03
|
[
"MIT"
] | 3
|
2022-01-14T02:55:44.000Z
|
2022-02-23T00:17:51.000Z
|
"""This module provides tests for the application's FastAPI API."""
import os
import pathlib
import bs4
import pytest
import requests
from fastapi.testclient import TestClient
from document.config import settings
from document.entrypoints.app import app
def check_finished_document_with_verses_success(
response: requests.Response, finished_document_path: str
) -> None:
"""
Helper to keep tests DRY.
Check that the finished_document_path exists and also check that
the HTML file associated with it exists and includes verses_html.
"""
finished_document_path = os.path.join(settings.output_dir(), finished_document_path)
assert os.path.isfile(finished_document_path)
html_file = "{}.html".format(finished_document_path.split(".")[0])
assert os.path.isfile(html_file)
assert response.json() == {
"finished_document_request_key": pathlib.Path(finished_document_path).stem,
"message": settings.SUCCESS_MESSAGE,
}
with open(html_file, "r") as fin:
html = fin.read()
parser = bs4.BeautifulSoup(html, "html.parser")
body: bs4.elements.ResultSet = parser.find_all("body")
assert body
verses_html: bs4.elements.ResultSet = parser.find_all(
"span", attrs={"class": "v-num"}
)
assert verses_html
assert response.ok
##########################################################################
## Specific targeted tests (wrt language, resource type, resource code) ##
##########################################################################
def test_en_ulb_wa_col_en_tn_wa_col_language_book_order_with_no_email() -> None:
"""
Produce verse interleaved document for English scripture and
translation notes for the book of Colossians.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
# "email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tn-wa-col_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tn_wa_col_en_tq_wa_col_language_book_order() -> None:
"""
Produce verse level interleaved document for English scripture,
translation notes, and translation questions for the book of Col.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
],
},
)
finished_document_path = (
"en-ulb-wa-col_en-tn-wa-col_en-tq-wa-col_language_book_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_tn_wa_jud_language_book_order() -> None:
"""
Produce verse level interleaved document for English scripture and
translation notes for the book of Jude.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "jud",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "jud",
},
],
},
)
finished_document_path = "en-ulb-wa-jud_en-tn-wa-jud_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_ar_nav_jud_language_book_order() -> None:
"""
Produce verse level interleaved document for language, ar, Arabic
scripture. There are no other resources than USFM available at
this time.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "ar",
"resource_type": "nav",
"resource_code": "jud",
},
],
},
)
finished_document_path = "ar-nav-jud_language_book_order.pdf"
with pytest.raises(Exception):
check_finished_document_with_verses_success(
response, finished_document_path
)
def test_pt_br_ulb_tn_language_book_order() -> None:
"""
Produce verse level interleaved document for Brazilian Portuguese scripture and
translation notes for the book of Genesis.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "pt-br",
"resource_type": "ulb",
"resource_code": "gen",
},
{
"lang_code": "pt-br",
"resource_type": "tn",
"resource_code": "gen",
},
],
},
)
finished_document_path = "pt-br-ulb-gen_pt-br-tn-gen_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_pt_br_ulb_tn_en_ulb_wa_tn_wa_luk_language_book_order() -> None:
"""
Produce verse level interleaved document for Brazilian Portuguese
and English scripture and translation notes for the book of Luke.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "pt-br",
"resource_type": "ulb",
"resource_code": "luk",
},
{
"lang_code": "pt-br",
"resource_type": "tn",
"resource_code": "luk",
},
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "luk",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "luk",
},
],
},
)
finished_document_path = "pt-br-ulb-luk_pt-br-tn-luk_en-ulb-wa-luk_en-tn-wa-luk_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_pt_br_ulb_tn_luk_en_ulb_wa_tn_wa_luk_sw_ulb_tn_col_language_book_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "pt-br",
"resource_type": "ulb",
"resource_code": "luk",
},
{
"lang_code": "pt-br",
"resource_type": "tn",
"resource_code": "luk",
},
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "luk",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "luk",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
],
},
)
finished_document_path = "pt-br-ulb-luk_pt-br-tn-luk_en-ulb-wa-luk_en-tn-wa-luk_sw-ulb-col_sw-tn-col_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tn_wa_col_en_tq_wa_col_en_tw_wa_col_sw_ulb_col_sw_tn_col_sw_tq_col_sw_tw_col_sw_ulb_tit_sw_tn_tit_sw_tq_tit_sw_tw_tit_language_book_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tn-wa-col_en-tq-wa-col_en-tw-wa-col_sw-ulb-col_sw-tn-col_sw-tq-col_sw-tw-col_sw-tn-tit_sw-tq-tit_sw-tw-tit_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tn_wa_col_en_tw_wa_col_sw_ulb_col_sw_tn_col_sw_tw_col_sw_ulb_tit_sw_tn_tit_sw_tw_tit_language_book_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tn-wa-col_en-tw-wa-col_sw-ulb-col_sw-tn-col_sw-tw-col_sw-ulb-tit_sw-tn-tit_sw-tw-tit_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tw_wa_col_sw_ulb_col_sw_tw_col_sw_ulb_tit_sw_tw_tit_language_book_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tw-wa-col_sw-ulb-col_sw-tw-col_sw-ulb-tit_sw-tw-tit_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tq_wa_col_en_tw_wa_col_sw_ulb_col_sw_tq_col_sw_tw_col_sw_ulb_tit_sw_tq_tit_sw_tw_tit_language_book_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tq-wa-col_en-tw-wa-col_sw-ulb-col_sw-tq-col_sw-tw-col_sw-ulb-tit_sw-tq-tit_sw-tw-tit_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tq_wa_col_en_tw_wa_col_sw_ulb_col_sw_tq_col_sw_tw_col_zh_cuv_tit_sw_tq_tit_sw_tw_tit_language_book_order() -> None:
"""
This test demonstrates the quirk of combining resources for
the same books but from different languages.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
{
"lang_code": "zh",
"resource_type": "cuv",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tq-wa-col_en-tw-wa-col_sw-ulb-col_sw-tq-col_sw-tw-col_zh-cuv-tit_sw-tq-tit_sw-tw-tit_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
###################################################################
# Tests that originally were randomly chosen and failed
# using our random combinatoric tests.
###################################################################
def test_zh_ulb_doesnt_exist_jol_zh_tn_jol_language_book_order() -> None:
"""
This shows that resource request for resource type ULB fails for
lang_code zh because such a resource type does not exist for zh.
Instead, cuv should have been requested. The other resources are
found and thus a PDF document is still created, but it lacks the
scripture verses.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "zh",
"resource_type": "ulb",
"resource_code": "jol",
},
{
"lang_code": "zh",
"resource_type": "tn",
"resource_code": "jol",
},
],
},
)
finished_document_path = "zh-ulb-jol_zh-tn-jol_language_book_order.pdf"
finished_document_path = os.path.join(
settings.output_dir(), finished_document_path
)
html_file = "{}.html".format(finished_document_path.split(".")[0])
assert os.path.exists(finished_document_path)
assert os.path.exists(html_file)
# This fails because zh does not have a ulb resource type and
# thus that resource is not found. The other resources are
# found and so the document can still be built.
# assert not os.path.isdir("working/temp/zh_ulb")
# assert os.path.isdir("working/temp/zh_tn")
# NOTE Still signals ok because ulb itself makes that
# resource request an ignored resource, but the overall
# document request succeeds.
assert response.ok
with open(html_file, "r") as fin:
html = fin.read()
parser = bs4.BeautifulSoup(html, "html.parser")
body: bs4.elements.ResultSet = parser.find_all("body")
assert body
verses_html: bs4.elements.ResultSet = parser.find_all(
"span", attrs={"class": "v-num"}
)
# Since ulb doesn't exist as a resource_type for zh, there
# are no verses available in the document.
assert not verses_html
def test_zh_cuv_jol_zh_tn_jol_language_book_order() -> None:
"""
This test succeeds by correcting the mistake of the document request
in the test above it, i.e., ulb -> cuv.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "zh",
"resource_type": "cuv",
"resource_code": "jol",
},
{
"lang_code": "zh",
"resource_type": "tn",
"resource_code": "jol",
},
],
},
)
finished_document_path = "zh-cuv-jol_zh-tn-jol_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_zh_cuv_jol_zh_tn_jol_zh_tq_jol_zh_tw_jol_language_book_order() -> None:
"""
This test succeeds by correcting the mistake of the document request
in the test above it, i.e., ulb -> cuv.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "zh",
"resource_type": "cuv",
"resource_code": "jol",
},
{
"lang_code": "zh",
"resource_type": "tn",
"resource_code": "jol",
},
{
"lang_code": "zh",
"resource_type": "tq",
"resource_code": "jol",
},
{
"lang_code": "zh",
"resource_type": "tw",
"resource_code": "jol",
},
],
},
)
finished_document_path = (
"zh-cuv-jol_zh-tn-jol_zh-tq-jol_zh-tw-jol_language_book_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_pt_br_ulb_luk_pt_br_tn_luk_language_book_order() -> None:
"""
Produce verse level interleaved document for Brazilian Portuguese scripture and
translation notes for the book of Genesis.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "pt-br",
"resource_type": "ulb",
"resource_code": "luk",
},
{
"lang_code": "pt-br",
"resource_type": "tn",
"resource_code": "luk",
},
],
},
)
finished_document_path = "pt-br-ulb-luk_pt-br-tn-luk_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
| 39.445491
| 182
| 0.454279
| 2,682
| 29,308
| 4.59657
| 0.077927
| 0.076898
| 0.066191
| 0.052401
| 0.871269
| 0.869403
| 0.842959
| 0.839228
| 0.830062
| 0.816272
| 0
| 0.000544
| 0.435069
| 29,308
| 742
| 183
| 39.498652
| 0.744036
| 0.080968
| 0
| 0.606688
| 0
| 0.012739
| 0.22253
| 0.061365
| 0
| 0
| 0
| 0
| 0.017516
| 1
| 0.02707
| false
| 0
| 0.012739
| 0
| 0.039809
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c5f366c4d5915e47fa5fd120e4814e1570d052cc
| 88
|
py
|
Python
|
conftest.py
|
marcinliebiediew/order_book
|
3587f1790cfa10ac77dffa1833e99a75991b8d11
|
[
"MIT"
] | 1
|
2022-03-12T10:51:07.000Z
|
2022-03-12T10:51:07.000Z
|
conftest.py
|
marcinliebiediew/order_book
|
3587f1790cfa10ac77dffa1833e99a75991b8d11
|
[
"MIT"
] | null | null | null |
conftest.py
|
marcinliebiediew/order_book
|
3587f1790cfa10ac77dffa1833e99a75991b8d11
|
[
"MIT"
] | null | null | null |
import pytest
from src import app as _app
@pytest.fixture
def app():
return _app
| 9.777778
| 27
| 0.715909
| 14
| 88
| 4.357143
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 88
| 8
| 28
| 11
| 0.897059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c5f6f7689a4d4e984e34a0d7c4a6e50fbbd43cd1
| 47
|
py
|
Python
|
theanompi/__init__.py
|
uoguelph-mlrg/Theano-MPI
|
4bf0ebc167967dc3cb0969d4b12e304ef11d724a
|
[
"ECL-2.0"
] | 65
|
2016-05-27T02:29:42.000Z
|
2022-03-29T20:17:29.000Z
|
theanompi/__init__.py
|
afcarl/Theano-MPI
|
4bf0ebc167967dc3cb0969d4b12e304ef11d724a
|
[
"ECL-2.0"
] | 19
|
2016-05-27T21:18:55.000Z
|
2019-03-23T07:02:44.000Z
|
theanompi/__init__.py
|
afcarl/Theano-MPI
|
4bf0ebc167967dc3cb0969d4b12e304ef11d724a
|
[
"ECL-2.0"
] | 30
|
2016-05-27T02:29:44.000Z
|
2019-05-17T04:46:17.000Z
|
from theanompi.rules import BSP, EASGD, GOSGD
| 15.666667
| 45
| 0.787234
| 7
| 47
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 2
| 46
| 23.5
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a84ffefe6cbd2b56c83b2a919f479d116ae7d176
| 65
|
py
|
Python
|
ml3d/datasets/augment/__init__.py
|
kylevedder/Open3D-ML
|
87ec50ed81d531377b1bb27e5c16f964201eadb0
|
[
"MIT"
] | 447
|
2020-10-14T23:16:41.000Z
|
2021-07-27T06:57:45.000Z
|
ml3d/datasets/augment/__init__.py
|
kylevedder/Open3D-ML
|
87ec50ed81d531377b1bb27e5c16f964201eadb0
|
[
"MIT"
] | 118
|
2020-10-14T10:20:37.000Z
|
2021-07-27T12:23:18.000Z
|
ml3d/datasets/augment/__init__.py
|
kylevedder/Open3D-ML
|
87ec50ed81d531377b1bb27e5c16f964201eadb0
|
[
"MIT"
] | 80
|
2020-10-14T17:35:48.000Z
|
2021-07-23T08:48:17.000Z
|
from .augmentation import SemsegAugmentation, ObjdetAugmentation
| 32.5
| 64
| 0.892308
| 5
| 65
| 11.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 65
| 1
| 65
| 65
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a853842bb9e8acd8990827cefe74e98a64b6060c
| 3,707
|
py
|
Python
|
score_zeroshot_gpt2.py
|
peterwestuw/FactualAblation
|
258228b8dbd4ced635a841417cfaa2d7edc9af8e
|
[
"MIT"
] | 2
|
2022-03-24T03:21:12.000Z
|
2022-03-24T03:51:55.000Z
|
score_zeroshot_gpt2.py
|
peterwestuw/FactualAblation
|
258228b8dbd4ced635a841417cfaa2d7edc9af8e
|
[
"MIT"
] | null | null | null |
score_zeroshot_gpt2.py
|
peterwestuw/FactualAblation
|
258228b8dbd4ced635a841417cfaa2d7edc9af8e
|
[
"MIT"
] | null | null | null |
from FA_data_utils import get_FA_synthetic_dataset, get_FA_wiki_dataset
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from tqdm import tqdm
from zeroshot_example_utils import get_nll_list, tokenize_for_gpt2_zs, get_truncated_inputs_gpt2_zs
import math
###
# Load Factual Ablation datasets
###
FA_wiki_dataset = get_FA_wiki_dataset()
FA_synth_dataset = get_FA_synthetic_dataset()
###
# Load zero-shot gpt2 and tokenizer
###
gpt2_tokenizer_zs = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2').eval().cuda()
####
# Evaluate on the synthetic dataset (identical to above besides dataset and margin)
####
print('='*20)
print('Evaluating on synthetic dataset...')
print('='*20)
model_scores = []
## get scores for true and false grounding over the full dataset
for ex in tqdm(FA_synth_dataset):
## get the components of the example; each is a string
grounding_True, grounding_False, context, target_str = ex
## First, get the target score given the True grounding
# truncate inputs to fit in the model window
gnd, ctxt = get_truncated_inputs_gpt2_zs(grounding_True, context, target_str, gpt2_tokenizer_zs, trim_order = 'shortest')
# tokenize inputs
inp, target = tokenize_for_gpt2_zs(gnd,ctxt,target_str)
# get the nll of the target under true grounding
score_True = get_nll_list(model,[inp],[target])[0]
## Next, get the target score given the False grounding (same as above)
gnd, ctxt = get_truncated_inputs_gpt2_zs(grounding_False, context, target_str, gpt2_tokenizer_zs, trim_order = 'shortest')
inp, target = tokenize_for_gpt2_zs(gnd,ctxt,target_str)
score_False = get_nll_list(model,[inp],[target])[0]
model_scores.append((score_True,score_False))
## get accuracy (margin=0) and margin-accuracy
print('='*20)
margin= 0
print('margin_acc (m = {}): {}'.format(margin, sum([(v[1] - v[0] > margin) for v in model_scores] ) / len(model_scores)))
margin= math.log(100)
print('margin_acc (m = {}): {}'.format(margin, sum([(v[1] - v[0] > margin) for v in model_scores] ) / len(model_scores)))
print('='*20)
####
# Evaluate on the wiki dataset (identical to above besides dataset and margin)
####
print('='*20)
print('Evaluating on natural (wiki) dataset...')
print('='*20)
model_scores = []
## get scores for true and false grounding over the full dataset
for ex in tqdm(FA_wiki_dataset):
## get the components of the example; each is a string
grounding_True, grounding_False, context, target_str = ex
## First, get the target score given the True grounding
# truncate inputs to fit in the model window
gnd, ctxt = get_truncated_inputs_gpt2_zs(grounding_True, context, target_str, gpt2_tokenizer_zs, trim_order = 'shortest')
# tokenize inputs
inp, target = tokenize_for_gpt2_zs(gnd,ctxt,target_str)
# get the nll of the target under true grounding
score_True = get_nll_list(model,[inp],[target])[0]
## Next, get the target score given the False grounding (same as above)
gnd, ctxt = get_truncated_inputs_gpt2_zs(grounding_False, context, target_str, gpt2_tokenizer_zs, trim_order = 'shortest')
inp, target = tokenize_for_gpt2_zs(gnd,ctxt,target_str)
score_False = get_nll_list(model,[inp],[target])[0]
model_scores.append((score_True,score_False))
## get accuracy (margin=0) and margin-accuracy
print('='*20)
margin= 0
print('margin_acc (m = {}): {}'.format(margin, sum([(v[1] - v[0] > margin) for v in model_scores] ) / len(model_scores)))
margin= math.log(1000)
print('margin_acc (m = {}): {}'.format(margin, sum([(v[1] - v[0] > margin) for v in model_scores] ) / len(model_scores)))
print('='*20)
| 37.826531
| 126
| 0.720798
| 554
| 3,707
| 4.593863
| 0.17148
| 0.051866
| 0.037721
| 0.033399
| 0.829862
| 0.802358
| 0.802358
| 0.802358
| 0.802358
| 0.802358
| 0
| 0.019533
| 0.15754
| 3,707
| 98
| 127
| 37.826531
| 0.795389
| 0.268411
| 0
| 0.680851
| 0
| 0
| 0.080256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.106383
| 0
| 0.106383
| 0.297872
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a865fb258c4549d3631e243609c07ce295534203
| 65
|
py
|
Python
|
projectile/tools/__init__.py
|
Vayel/projectile
|
f9a7cba9cc1f07f1e6ea8aad9e7567e0a3ba03e7
|
[
"MIT"
] | null | null | null |
projectile/tools/__init__.py
|
Vayel/projectile
|
f9a7cba9cc1f07f1e6ea8aad9e7567e0a3ba03e7
|
[
"MIT"
] | 9
|
2016-12-28T20:36:57.000Z
|
2017-01-04T15:29:41.000Z
|
projectile/tools/__init__.py
|
Vayel/projectile
|
f9a7cba9cc1f07f1e6ea8aad9e7567e0a3ba03e7
|
[
"MIT"
] | null | null | null |
from .google import *
from .drive import *
from .trello import *
| 16.25
| 21
| 0.723077
| 9
| 65
| 5.222222
| 0.555556
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184615
| 65
| 3
| 22
| 21.666667
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a868a8da332ca1e83077c2e7bd45f7474abc3f71
| 72
|
py
|
Python
|
collections/nemo_nlp/nemo_nlp/utils/__init__.py
|
luungoc2005/NeMo
|
5e1d9cba4d245135023396479a52a951a911b2a8
|
[
"Apache-2.0"
] | null | null | null |
collections/nemo_nlp/nemo_nlp/utils/__init__.py
|
luungoc2005/NeMo
|
5e1d9cba4d245135023396479a52a951a911b2a8
|
[
"Apache-2.0"
] | null | null | null |
collections/nemo_nlp/nemo_nlp/utils/__init__.py
|
luungoc2005/NeMo
|
5e1d9cba4d245135023396479a52a951a911b2a8
|
[
"Apache-2.0"
] | null | null | null |
from .callbacks import *
from .metrics import *
from .nlp_utils import *
| 24
| 24
| 0.763889
| 10
| 72
| 5.4
| 0.6
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152778
| 72
| 3
| 25
| 24
| 0.885246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a89d05b17cf217e39a1011aa38136c310367f1b7
| 213
|
py
|
Python
|
test/test_main.py
|
michaelbarton/nucleotides-cli
|
04c94773a9186dc67a887e91e3cdc9ba4a41d3fc
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
test/test_main.py
|
michaelbarton/nucleotides-cli
|
04c94773a9186dc67a887e91e3cdc9ba4a41d3fc
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
test/test_main.py
|
michaelbarton/nucleotides-cli
|
04c94773a9186dc67a887e91e3cdc9ba4a41d3fc
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import nose.tools as nose
import nucleotides.main as main
import nucleotides.command.fetch_data
def test_command():
nose.assert_equal(main.select_command('fetch-data'), nucleotides.command.fetch_data)
| 26.625
| 88
| 0.788732
| 30
| 213
| 5.433333
| 0.466667
| 0.220859
| 0.294479
| 0.331288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122066
| 213
| 7
| 89
| 30.428571
| 0.871658
| 0
| 0
| 0
| 0
| 0
| 0.046948
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| true
| 0
| 0.6
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7637e03234348522fcd3d788416863b5bb628129
| 19,601
|
py
|
Python
|
psono/restapi/tests/mfa_google_authenticator.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 48
|
2018-04-19T15:50:58.000Z
|
2022-01-23T15:58:11.000Z
|
psono/restapi/tests/mfa_google_authenticator.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 9
|
2018-09-13T14:56:18.000Z
|
2020-01-17T16:44:33.000Z
|
psono/restapi/tests/mfa_google_authenticator.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 11
|
2019-09-20T11:53:47.000Z
|
2021-07-18T22:41:31.000Z
|
from django.urls import reverse
from django.utils import timezone
from django.conf import settings
from django.contrib.auth.hashers import make_password
from rest_framework import status
from datetime import timedelta
import random
import string
import binascii
import os
import hashlib
import pyotp
import bcrypt
import json
import nacl.encoding
import nacl.utils
import nacl.secret
from restapi import models
from .base import APITestCaseExtended
from ..utils import encrypt_with_db_secret
class GoogleAuthenticatorVerifyTests(APITestCaseExtended):
def setUp(self):
self.test_email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt = 'a'
self.test_username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce = '33afce78b0152075457e2a4d58b80312162f08ee932551c833b3d08d58574f03'
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
username=self.test_username,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce=self.test_user_sauce,
is_email_active=True
)
self.token = ''.join(random.choice(string.ascii_lowercase) for _ in range(64))
self.session_secret_key = hashlib.sha256(settings.DB_SECRET.encode()).hexdigest()
models.Token.objects.create(
key= hashlib.sha512(self.token.encode()).hexdigest(),
user=self.test_user_obj,
secret_key=self.session_secret_key,
valid_till = timezone.now() + timedelta(seconds=10)
)
secret = pyotp.random_base32()
self.totp = pyotp.TOTP(secret)
models.Google_Authenticator.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
secret = encrypt_with_db_secret(str(secret))
)
# encrypt authorization validator with session key
secret_box = nacl.secret.SecretBox(self.session_secret_key, encoder=nacl.encoding.HexEncoder)
authorization_validator_nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)
authorization_validator_nonce_hex = nacl.encoding.HexEncoder.encode(authorization_validator_nonce)
encrypted = secret_box.encrypt(json.dumps({}).encode("utf-8"), authorization_validator_nonce)
authorization_validator = encrypted[len(authorization_validator_nonce):]
authorization_validator_hex = nacl.encoding.HexEncoder.encode(authorization_validator)
self.authorization_validator = json.dumps({
'text': authorization_validator_hex.decode(),
'nonce': authorization_validator_nonce_hex.decode(),
})
def test_get_authentication_ga_verify(self):
"""
Tests GET method on authentication_ga_verify
"""
url = reverse('authentication_ga_verify')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_authentication_ga_verify(self):
"""
Tests PUT method on authentication_ga_verify
"""
url = reverse('authentication_ga_verify')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_post_authentication_ga_verify(self):
"""
Tests POST method on authentication_ga_verify
"""
url = reverse('authentication_ga_verify')
data = {
'token': self.token,
'ga_token': self.totp.now()
}
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_authentication_ga_verify_invalid_token(self):
"""
Tests POST method on authentication_ga_verify with invalid token
"""
url = reverse('authentication_ga_verify')
data = {
'token': '12345',
'ga_token': self.totp.now()
}
self.client.credentials(HTTP_AUTHORIZATION='Token ' + '12345', HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_authentication_ga_verify_no_proper_formatted_ga_token(self):
"""
Tests POST method on authentication_ga_verify with no proper formatted ga_token
"""
url = reverse('authentication_ga_verify')
data = {
'token': self.token,
'ga_token': 'ABCDEF'
}
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('non_field_errors'), [u'GA Tokens only contain digits.'])
def test_post_authentication_ga_verify_invalid_ga_token(self):
"""
Tests POST method on authentication_ga_verify with an invalid ga_token
"""
url = reverse('authentication_ga_verify')
data = {
'token': self.token,
'ga_token': '012345'
}
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotEqual(response.data.get('non_field_errors', False), False)
def test_delete_authentication_ga_verify(self):
"""
Tests DELETE method on authentication_ga_verify
"""
url = reverse('authentication_ga_verify')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class GoogleAuthenticatorTests(APITestCaseExtended):
def setUp(self):
self.test_email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt = 'a'
self.test_username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce = '6df1f310730e5464ce23e05fa4eca0de3fe30805fc8cc1d6b37389262e4bd9c3'
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
username=self.test_username,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce=self.test_user_sauce,
is_email_active=True
)
self.test_email2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt2 = bcrypt.hashpw(self.test_email2.encode(), settings.EMAIL_SECRET_SALT.encode()).decode().replace(settings.EMAIL_SECRET_SALT, '', 1)
self.test_username2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey2 = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key2 = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key2 = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key2 = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce2 = 'a67fef1ff29eb8f866feaccad336fc6311fa4c71bc183b14c8fceff7416add99'
self.test_user_obj2 = models.User.objects.create(
username=self.test_username2,
email=encrypt_with_db_secret(self.test_email2),
email_bcrypt=self.test_email_bcrypt2,
authkey=make_password(self.test_authkey2),
public_key=self.test_public_key2,
private_key=self.test_private_key2,
private_key_nonce=self.test_private_key_nonce2,
secret_key=self.test_secret_key2,
secret_key_nonce=self.test_secret_key_nonce2,
user_sauce=self.test_user_sauce2,
is_email_active=True
)
def test_get_user_ga(self):
"""
Tests GET method on user_ga
"""
ga = models.Google_Authenticator.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
secret = '1234'
)
url = reverse('user_ga')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.data, {
"google_authenticators":[{
"id":ga.id,
"active":ga.active,
"title":"My Sweet Title"
}]
})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_put_user_ga(self):
"""
Tests PUT method on user_ga
"""
url = reverse('user_ga')
data = {
'title': 'asdu5zz53',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNotEqual(response.data.get('id', False), False)
self.assertNotEqual(response.data.get('secret', False), False)
def test_put_user_ga_no_title(self):
"""
Tests PUT method on user_ga with no title
"""
url = reverse('user_ga')
data = {
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_user_ga_no_parameters(self):
"""
Tests POST method on user_ga
"""
url = reverse('user_ga')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_activate_ga_success(self):
"""
Tests POST method on user_ga to activate a Google Authenticator
"""
secret = pyotp.random_base32()
totp = pyotp.TOTP(secret)
ga = models.Google_Authenticator.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
secret = encrypt_with_db_secret(str(secret)),
active= False
)
url = reverse('user_ga')
data = {
'google_authenticator_id': ga.id,
'google_authenticator_token': totp.now(),
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
db_ga = models.Google_Authenticator.objects.get(pk=ga.id)
self.assertTrue(db_ga.active)
def test_activate_ga_failure_incorrect_ga_token(self):
"""
Tests POST method on user_ga to activate a Google Authenticator
"""
secret = pyotp.random_base32()
ga = models.Google_Authenticator.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
secret = encrypt_with_db_secret(str(secret)),
active= False
)
url = reverse('user_ga')
data = {
'google_authenticator_id': ga.id,
'google_authenticator_token': '000000',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_activate_ga_failure_already_active(self):
"""
Tests POST method on user_ga to activate a Google Authenticator that is already active
"""
secret = pyotp.random_base32()
totp = pyotp.TOTP(secret)
ga = models.Google_Authenticator.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
secret = encrypt_with_db_secret(str(secret)),
active= True
)
url = reverse('user_ga')
data = {
'google_authenticator_id': ga.id,
'google_authenticator_token': totp.now(),
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_activate_ga_failure_belongs_to_other_user(self):
"""
Tests POST method on user_ga to activate a Google Authenticator of another user
"""
secret = pyotp.random_base32()
totp = pyotp.TOTP(secret)
ga = models.Google_Authenticator.objects.create(
user=self.test_user_obj2,
title= 'My Sweet Title',
secret = encrypt_with_db_secret(str(secret)),
active= False
)
url = reverse('user_ga')
data = {
'google_authenticator_id': ga.id,
'google_authenticator_token': totp.now(),
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_activate_ga_failure_non_digits(self):
"""
Tests POST method on user_ga to activate a Google Authenticator with a code containing non digits
"""
secret = pyotp.random_base32()
ga = models.Google_Authenticator.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
secret = encrypt_with_db_secret(str(secret)),
active= False
)
url = reverse('user_ga')
data = {
'google_authenticator_id': ga.id,
'google_authenticator_token': 'ABCDEF',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_activate_ga_failure_google_auth_does_not_exist(self):
"""
Tests POST method on user_ga to activate a Google Authenticator
"""
secret = pyotp.random_base32()
totp = pyotp.TOTP(secret)
url = reverse('user_ga')
data = {
'google_authenticator_id': '6ea5c814-b58f-4bbe-b93d-a3d4c31574c7',
'google_authenticator_token': totp.now(),
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_user_ga(self):
"""
Tests DELETE method on user_ga
"""
ga = models.Google_Authenticator.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
secret = '1234'
)
url = reverse('user_ga')
data = {
'google_authenticator_id': ga.id
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.data, {
"google_authenticators":[]
})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_user_ga_no_google_authenticator_id (self):
"""
Tests DELETE method on user_ga with no google_authenticator_id
"""
url = reverse('user_ga')
data = {
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_user_ga_google_authenticator_id_no_uuid(self):
"""
Tests DELETE method on user_ga with google_authenticator_id not being a uuid
"""
url = reverse('user_ga')
data = {
'google_authenticator_id': '12345'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_user_ga_google_authenticator_id_not_exist(self):
"""
Tests DELETE method on user_ga with google_authenticator_id not existing
"""
url = reverse('user_ga')
data = {
'google_authenticator_id': '7e866c32-3e4d-4421-8a7d-3ac62f980fd3'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| 35.12724
| 163
| 0.664915
| 2,316
| 19,601
| 5.325561
| 0.090674
| 0.059024
| 0.035025
| 0.034052
| 0.822847
| 0.772256
| 0.762121
| 0.735528
| 0.718096
| 0.710475
| 0
| 0.021478
| 0.239886
| 19,601
| 557
| 164
| 35.190305
| 0.806363
| 0.06515
| 0
| 0.606232
| 0
| 0
| 0.077583
| 0.04692
| 0.002833
| 0
| 0
| 0
| 0.082153
| 1
| 0.065156
| false
| 0.011331
| 0.056657
| 0
| 0.127479
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
76603d7fc0f19b5da06e126c1b5a04eb16f15c9a
| 1,753
|
py
|
Python
|
tests/database/test_databaes_universe.py
|
evetrivia/thanatos
|
664c12a8ccf4d27ab0e06e0969bbb6381f74789c
|
[
"MIT"
] | 1
|
2015-08-03T14:30:18.000Z
|
2015-08-03T14:30:18.000Z
|
tests/database/test_databaes_universe.py
|
evetrivia/thanatos
|
664c12a8ccf4d27ab0e06e0969bbb6381f74789c
|
[
"MIT"
] | 14
|
2015-05-05T22:37:43.000Z
|
2015-07-31T04:45:14.000Z
|
tests/database/test_databaes_universe.py
|
evetrivia/thanatos
|
664c12a8ccf4d27ab0e06e0969bbb6381f74789c
|
[
"MIT"
] | null | null | null |
import mock
import unittest2
from thanatos.database import universe
class DatabaseUniverseTestCase(unittest2.TestCase):
def setUp(self):
pass
@mock.patch('thanatos.database.universe.execute_sql')
def test_get_all_regions(self, mock_execute_sql):
""" """
mock_db_connection = mock.MagicMock()
mock_execute_sql.return_value = [(1, 'test')]
results = universe.get_all_regions(mock_db_connection)
mock_execute_sql.assert_called_with('CALL get_all_regions();', mock_db_connection)
self.assertEqual(results, [(1, 'test')])
self.assertEqual(universe.get_all_regions._results, [(1, 'test')])
@mock.patch('thanatos.database.universe.execute_sql')
def test_get_all_not_wh_regions(self, mock_execute_sql):
""" """
mock_db_connection = mock.MagicMock()
mock_execute_sql.return_value = [(1, 'test')]
results = universe.get_all_not_wh_regions(mock_db_connection)
mock_execute_sql.assert_called_with('CALL get_all_not_wh_regions();', mock_db_connection)
self.assertEqual(results, [(1, 'test')])
self.assertEqual(universe.get_all_not_wh_regions._results, [(1, 'test')])
@mock.patch('thanatos.database.universe.execute_sql')
def test_get_all_regions_connected_to_region(self, mock_execute_sql):
""" """
mock_db_connection = mock.MagicMock()
mock_region_id = 101
mock_execute_sql.return_value = [(1, 'test')]
results = universe.get_all_regions_connected_to_region(mock_db_connection, mock_region_id)
mock_execute_sql.assert_called_with('CALL get_all_regions_connected_to_region(101);', mock_db_connection)
self.assertEqual(results, [(1, 'test')])
| 34.372549
| 113
| 0.705077
| 222
| 1,753
| 5.135135
| 0.18018
| 0.105263
| 0.110526
| 0.105263
| 0.857018
| 0.852632
| 0.803509
| 0.803509
| 0.757018
| 0.757018
| 0
| 0.011111
| 0.178551
| 1,753
| 51
| 114
| 34.372549
| 0.780556
| 0
| 0
| 0.4
| 0
| 0
| 0.141782
| 0.104167
| 0
| 0
| 0
| 0
| 0.266667
| 1
| 0.133333
| false
| 0.033333
| 0.1
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7699c06fdee5b52a0fbf1178a29a246da5332e18
| 2,542
|
py
|
Python
|
courses/src/mark_app/tests.py
|
yuramorozov01/courses_system
|
582532b2a2753d89642e1e8dbee0f369774638b1
|
[
"Apache-2.0"
] | null | null | null |
courses/src/mark_app/tests.py
|
yuramorozov01/courses_system
|
582532b2a2753d89642e1e8dbee0f369774638b1
|
[
"Apache-2.0"
] | null | null | null |
courses/src/mark_app/tests.py
|
yuramorozov01/courses_system
|
582532b2a2753d89642e1e8dbee0f369774638b1
|
[
"Apache-2.0"
] | null | null | null |
from base_app.tests import BaseTestCase
from django.urls import reverse
class MarkEndPointTestCase(BaseTestCase):
def test_add_mark_to_task(self):
course_id, lecture_id, task_statement_id, task_id = self.create_task()
jwt = self.auth('qqq')
data = {
'mark_value': 9,
}
url = reverse(
'mark-list',
kwargs={
'course_pk': course_id,
'lecture_pk': lecture_id,
'task_statement_pk': task_statement_id,
'task_pk': task_id,
}
)
resp, resp_data = self.post(url, data, jwt)
assert resp.status_code == 201
assert resp_data['mark_value'] == data['mark_value']
assert self.users['qqq']['id'] == resp_data['author']['id']
def test_add_mark_to_task_more_than_10(self):
course_id, lecture_id, task_statement_id, task_id = self.create_task()
jwt = self.auth('qqq')
data = {
'mark_value': 20,
}
url = reverse(
'mark-list',
kwargs={
'course_pk': course_id,
'lecture_pk': lecture_id,
'task_statement_pk': task_statement_id,
'task_pk': task_id,
}
)
resp, resp_data = self.post(url, data, jwt)
assert resp.status_code == 400
assert 'ensure' in resp_data['mark_value'][0].lower()
def test_get_mark_as_student(self):
course_id, lecture_id, task_statement_id, task_id = self.create_task()
jwt = self.auth('qqq')
data = {
'mark_value': 9,
}
url = reverse(
'mark-list',
kwargs={
'course_pk': course_id,
'lecture_pk': lecture_id,
'task_statement_pk': task_statement_id,
'task_pk': task_id,
}
)
resp, resp_data = self.post(url, data, jwt)
jwt = self.auth('new_student_2')
url = reverse(
'mark-detail',
kwargs={
'course_pk': course_id,
'lecture_pk': lecture_id,
'task_statement_pk': task_statement_id,
'task_pk': task_id,
'pk': resp_data["id"],
}
)
resp, resp_data = self.get(url, data, jwt)
assert resp.status_code == 200
assert resp_data['mark_value'] == data['mark_value']
assert self.users['qqq']['id'] == resp_data['author']['id']
| 31.775
| 78
| 0.521637
| 290
| 2,542
| 4.237931
| 0.196552
| 0.068348
| 0.084622
| 0.125305
| 0.809601
| 0.794955
| 0.762408
| 0.737998
| 0.737998
| 0.737998
| 0
| 0.010468
| 0.361133
| 2,542
| 79
| 79
| 32.177215
| 0.746305
| 0
| 0
| 0.633803
| 0
| 0
| 0.1369
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 1
| 0.042254
| false
| 0
| 0.028169
| 0
| 0.084507
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4fa17151a45f6743ec328b2ce352648f51418311
| 219
|
py
|
Python
|
src/user/admin.py
|
EducationalBot/EduBotAPIServer
|
f571a0acd8e5348bba04febc17606c29511ebef7
|
[
"MIT"
] | 1
|
2021-11-08T21:25:27.000Z
|
2021-11-08T21:25:27.000Z
|
src/user/admin.py
|
EducationalBot/EduBotAPIServer
|
f571a0acd8e5348bba04febc17606c29511ebef7
|
[
"MIT"
] | null | null | null |
src/user/admin.py
|
EducationalBot/EduBotAPIServer
|
f571a0acd8e5348bba04febc17606c29511ebef7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as UserAdminInterface
from user.models import User
class UserAdmin(UserAdminInterface):
pass
admin.site.register(User, UserAdmin)
| 19.909091
| 69
| 0.817352
| 28
| 219
| 6.392857
| 0.535714
| 0.111732
| 0.189944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123288
| 219
| 10
| 70
| 21.9
| 0.932292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.5
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4fa503fd2613b8f0ec3e467d869c251ea51b77e0
| 5,641
|
py
|
Python
|
tests/unit/series6/test_interface.py
|
n0mn0m/circuit_roomba
|
6b33c524951d348b0eba6a8e01d26d5adc2b1886
|
[
"MIT"
] | 1
|
2021-09-07T08:00:22.000Z
|
2021-09-07T08:00:22.000Z
|
tests/unit/series6/test_interface.py
|
n0mn0m/circuit_roomba
|
6b33c524951d348b0eba6a8e01d26d5adc2b1886
|
[
"MIT"
] | null | null | null |
tests/unit/series6/test_interface.py
|
n0mn0m/circuit_roomba
|
6b33c524951d348b0eba6a8e01d26d5adc2b1886
|
[
"MIT"
] | 1
|
2021-09-07T08:01:51.000Z
|
2021-09-07T08:01:51.000Z
|
import unittest
from unittest import mock
from circuitroomba.series6 import interface, opcode
class Test_interface(unittest.TestCase):
"""
interface is initialized in each test to prevent carrying over
any state or history from test to test.
"""
def setUp(self) -> None:
self.board = mock.MagicMock()
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_history_not_available_by_default(self, uart, busio):
oi = interface.OpenInterface(self.board.RX, self.board.TX, self.board.A1)
self.assertEqual(False, oi.trace)
self.assertEqual(False, hasattr(oi, "history"))
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_valid_modes_return_only_valid_modes(self, uart, busio):
oi = interface.OpenInterface(
self.board.RX, self.board.TX, self.board.A1, trace=True
)
self.assertEqual(oi.valid_modes, ("off", "safe", "passive", "full"))
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_change_operating_mode(self, uart, busio):
oi = interface.OpenInterface(
self.board.RX, self.board.TX, self.board.A1, trace=True
)
oi.operating_mode = "safe"
self.assertEqual(oi.operating_mode, "safe")
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_invalid_operating_mode(self, uart, busio):
oi = interface.OpenInterface(
self.board.RX, self.board.TX, self.board.A1, trace=True
)
with self.assertRaises(RuntimeError):
oi.operating_mode = "kernel"
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_send_new_command(self, uart, busio):
oi = interface.OpenInterface(
self.board.RX, self.board.TX, self.board.A1, trace=True
)
oi.command(opcode.START)
self.assertEqual(oi.history[0][1], opcode.START)
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_history_cannot_exceed_10(self, uart, busio):
oi = interface.OpenInterface(
self.board.RX, self.board.TX, self.board.A1, trace=True
)
for i in range(15):
oi.command(opcode.START)
self.assertEqual(len(oi.history), 10)
for i in range(9):
self.assertEqual(oi.history[i], ("passive", opcode.START, b"\x00"))
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_send_invalid_command(self, uart, busio):
oi = interface.OpenInterface(
self.board.RX, self.board.TX, self.board.A1, trace=True
)
with self.assertRaises(KeyError):
oi.command("0x10")
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_send_invalid_command_for_current_operating_mode(self, uart, busio):
oi = interface.OpenInterface(
self.board.RX, self.board.TX, self.board.A1, trace=True
)
with self.assertRaises(RuntimeError):
oi.command(opcode.STOP)
oi.command(opcode.BAUD, 11)
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_send_new_command_with_data(self, uart, busio):
oi = interface.OpenInterface(
self.board.RX, self.board.TX, self.board.A1, trace=True
)
oi.wake_up()
oi.command(opcode.START)
oi.command(opcode.BAUD, 11)
self.assertEqual(oi.history[1], ("passive", opcode.START, b"\x00"))
self.assertEqual(oi.history[0], (None, opcode.BAUD, b"\x0b"))
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_send_new_command_with_invalid_data(self, uart, busio):
oi = interface.OpenInterface(
self.board.RX, self.board.TX, self.board.A1, trace=True
)
with self.assertRaises(RuntimeError):
oi.command(opcode.RESET, 11)
@mock.patch("circuitroomba.series6.interface.busio", return_value=mock.MagicMock())
@mock.patch(
"circuitroomba.series6.interface.busio.UART", return_value=mock.MagicMock()
)
def test_keep_awake_is_not_available(self, uart, busio):
oi = interface.OpenInterface(self.board.RX, self.board.TX, self.board.A1)
with self.assertRaises(NotImplementedError):
oi.keep_awake()
| 38.114865
| 87
| 0.669208
| 677
| 5,641
| 5.45938
| 0.138848
| 0.082792
| 0.130952
| 0.172619
| 0.798431
| 0.766234
| 0.747836
| 0.747836
| 0.744318
| 0.744318
| 0
| 0.013102
| 0.201737
| 5,641
| 147
| 88
| 38.37415
| 0.807684
| 0.018082
| 0
| 0.516949
| 0
| 0
| 0.169958
| 0.157456
| 0
| 0
| 0.000725
| 0
| 0.118644
| 1
| 0.101695
| false
| 0.025424
| 0.025424
| 0
| 0.135593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4fa623a4a1b533bfbc6adede7e167575569a86fc
| 160
|
py
|
Python
|
src/tasks/lesson03/domzad_three.py
|
vadimkondratovich/asd
|
5f2db494f739ea663795c5d4a924ced942cb1852
|
[
"MIT"
] | null | null | null |
src/tasks/lesson03/domzad_three.py
|
vadimkondratovich/asd
|
5f2db494f739ea663795c5d4a924ced942cb1852
|
[
"MIT"
] | 8
|
2021-01-10T09:38:54.000Z
|
2021-02-28T12:33:58.000Z
|
src/tasks/lesson03/domzad_three.py
|
vadimkondratovich/asd
|
5f2db494f739ea663795c5d4a924ced942cb1852
|
[
"MIT"
] | null | null | null |
word = input("Введите строку: ")
if len(word) > 5:
print(len(word))
elif len(word) < 5:
print("Need more!")
elif len(word) == 5:
print("It's five")
| 20
| 32
| 0.58125
| 26
| 160
| 3.576923
| 0.538462
| 0.301075
| 0.258065
| 0.419355
| 0.365591
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.2125
| 160
| 7
| 33
| 22.857143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
4fa81748e8fcca6150114323a53827735b187c74
| 797
|
py
|
Python
|
2-aml-pytorch-samples/download-dataset.py
|
szjarek/articles-AzureML-with-Python
|
d414a898855e74b79cdf0d06ded3d31844197aed
|
[
"MIT"
] | null | null | null |
2-aml-pytorch-samples/download-dataset.py
|
szjarek/articles-AzureML-with-Python
|
d414a898855e74b79cdf0d06ded3d31844197aed
|
[
"MIT"
] | null | null | null |
2-aml-pytorch-samples/download-dataset.py
|
szjarek/articles-AzureML-with-Python
|
d414a898855e74b79cdf0d06ded3d31844197aed
|
[
"MIT"
] | null | null | null |
import os
import urllib.request
DATA_FOLDER = 'datasets/mnist-data'
DATASET_BASE_URL = 'https://azureopendatastorage.blob.core.windows.net/mnist/'
os.makedirs(DATA_FOLDER, exist_ok=True)
urllib.request.urlretrieve(
os.path.join(DATASET_BASE_URL, 'train-images-idx3-ubyte.gz'),
filename=os.path.join(DATA_FOLDER, 'train-images.gz'))
urllib.request.urlretrieve(
os.path.join(DATASET_BASE_URL, 'train-labels-idx1-ubyte.gz'),
filename=os.path.join(DATA_FOLDER, 'train-labels.gz'))
urllib.request.urlretrieve(
os.path.join(DATASET_BASE_URL, 't10k-images-idx3-ubyte.gz'),
filename=os.path.join(DATA_FOLDER, 'test-images.gz'))
urllib.request.urlretrieve(
os.path.join(DATASET_BASE_URL, 't10k-labels-idx1-ubyte.gz'),
filename=os.path.join(DATA_FOLDER, 'test-labels.gz'))
| 39.85
| 78
| 0.757842
| 119
| 797
| 4.932773
| 0.285714
| 0.081772
| 0.136286
| 0.177172
| 0.725724
| 0.725724
| 0.725724
| 0.725724
| 0.725724
| 0.69506
| 0
| 0.010944
| 0.082811
| 797
| 20
| 79
| 39.85
| 0.792066
| 0
| 0
| 0.235294
| 0
| 0
| 0.295739
| 0.12782
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96ca5bcdb4886e8e3e0e186cb8a1432ed7d3da93
| 13,523
|
py
|
Python
|
transfer.py
|
BboyHanat/Real-Time-Video-Transfer
|
9fd2ecfcb1495b0c28a43a0c9e5de2228690975c
|
[
"MIT"
] | 2
|
2019-12-03T13:46:09.000Z
|
2022-02-12T02:27:05.000Z
|
transfer.py
|
BboyHanat/Real-Time-Video-Transfer
|
9fd2ecfcb1495b0c28a43a0c9e5de2228690975c
|
[
"MIT"
] | 1
|
2022-01-16T08:50:01.000Z
|
2022-01-16T08:50:01.000Z
|
transfer.py
|
BboyHanat/Real-Time-Video-Transfer
|
9fd2ecfcb1495b0c28a43a0c9e5de2228690975c
|
[
"MIT"
] | null | null | null |
import logging
from logging.handlers import TimedRotatingFileHandler
import os
import torch.optim as optim
from torchvision import transforms
from PIL import Image
from transform_net import TransformNet
from style_network import *
from loss_network import *
from dataset import get_loader, get_image_loader
from opticalflow import opticalflow
import cv2
osp = os.path
trHandler = TimedRotatingFileHandler("train_log.log", when="w1", interval=4, backupCount=12)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d:%(filename)-12s[%(lineno)4d] %(levelname)-6s %(message)s',
'%Y-%m-%d %H:%M:%S')
level = logging.DEBUG
trHandler.setFormatter(formatter)
trHandler.setLevel(level)
logger = logging.getLogger()
logger.addHandler(trHandler)
class Transfer:
def __init__(self, epoch, data_path, style_path, vgg_path, lr, spatial_a, spatial_b, spatial_r, temporal_lambda, gpu=False, img_shape=(640, 360)):
self.epoch = epoch
self.data_path = data_path
self.style_path = style_path
self.lr = lr
self.gpu = gpu
self.s_a = spatial_a
self.s_b = spatial_b
self.s_r = spatial_r
self.t_l = temporal_lambda
self.style_net = StyleNet()
self.loss_net = LossNet(vgg_path)
self.style_layer = ['conv1_2', 'conv2_2', 'conv3_4', 'conv4_4']
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
self.img_shape = img_shape
def load_style(self):
img = Image.open(self.style_path)
img = img.resize(self.img_shape)
img = np.asarray(img, np.float32)/255.0
img = self.transform(img)
img = img.unsqueeze(0)
img = Variable(img, requires_grad=True)
return img
def train(self):
style_img = self.load_style()
if self.gpu:
self.style_net = self.style_net.cuda()
self.loss_net = self.loss_net.cuda()
style_img = style_img.cuda()
adam = optim.Adam(self.style_net.parameters(), lr=self.lr)
sgd = optim.SGD(self.style_net.parameters(), lr=self.lr, momentum=0.9)
adadelta = optim.Adadelta(self.style_net.parameters(), lr=self.lr)
loader = get_loader(1, self.data_path, self.img_shape, self.transform)
logger.info('Data Load Success!!')
print('Data Load Success!!')
logger.info('Training Start!!')
print('Training Start!!')
for count in range(self.epoch):
for step, frames in enumerate(loader):
logger.info('step {}'.format(str(step)))
for i in range(1, len(frames)):
x_t = frames[i]
x_t1 = frames[i-1]
if self.gpu:
x_t = x_t.cuda()
x_t1 = x_t1.cuda()
h_xt = self.style_net(x_t)
h_xt1 = self.style_net(x_t1)
s_xt = self.loss_net(x_t, self.style_layer)
s_xt1 = self.loss_net(x_t1, self.style_layer)
s_hxt = self.loss_net(h_xt, self.style_layer)
s_hxt1 = self.loss_net(h_xt1, self.style_layer)
s = self.loss_net(style_img, self.style_layer)
# ContentLoss, conv4_2
content_t = ContentLoss(self.gpu)(s_xt[3], s_hxt[3])
content_t1 = ContentLoss(self.gpu)(s_xt1[3], s_hxt1[3])
content_loss = content_t + content_t1
# StyleLoss
style_t = StyleLoss(self.gpu)(s[0], s_hxt[0])
style_t1 = StyleLoss(self.gpu)(s[0], s_hxt1[0])
for layer in range(1, len(self.style_layer)):
style_t += StyleLoss(self.gpu)(s[layer], s_hxt[layer])
style_t1 += StyleLoss(self.gpu)(s[layer], s_hxt1[layer])
# TVLoss
tv_loss = TVLoss()(s_hxt[3])
style_loss = style_t + style_t1
if self.gpu:
flow, mask = opticalflow(h_xt1.data.cpu().numpy(), h_xt.data.cpu().numpy())
# Optical flow
else:
flow, mask = opticalflow(h_xt1.data.numpy(), h_xt.data.numpy())
if self.gpu:
flow = flow.cuda()
mask = mask.cuda()
# Temporal Loss
temporal_loss = TemporalLoss(self.gpu)(h_xt, flow, mask)
# Spatial Loss
spatial_loss = self.s_a * content_loss + self.s_b * style_loss + self.s_r * tv_loss
print('content_loss is {}, style_loss is {}, tv_loss is {}'.format(self.s_a * content_loss, self.s_b * style_loss, self.s_r * tv_loss))
Loss = content_loss # spatial_loss + self.t_l * temporal_loss
Loss.backward(retain_graph=True)
adadelta.step()
logger.info('Loss is: {}, spatial_loss is: {}, temporal_loss is: {}, step: {} frame {}'.format(str(Loss), str(spatial_loss), str(temporal_loss), str(step), str(i)))
print('Loss is: {}, spatial_loss is: {}, temporal_loss is: {}, step: {} frame {}'.format(str(Loss), str(spatial_loss), str(temporal_loss), str(step), str(i)))
if i % 300 == 0 and i >= 300:
s_np_image = x_t.data.cpu().numpy()
s_np_image = np.squeeze(np.transpose(s_np_image, (0, 2, 3, 1)))
transform_np_s = (s_np_image * (0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255
transform_np_s = transform_np_s.clip(0, 255)
s_np_image = np.asarray(transform_np_s, np.uint8)
s_np_image = cv2.cvtColor(s_np_image, cv2.COLOR_RGB2BGR)
np_image = h_xt.data.cpu().numpy()
np_image = np.squeeze(np.transpose(np_image, (0, 2, 3, 1)))
# transform_np = (np_image * (0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255
transform_np = (np_image + 1) * 127.5
transform_np = transform_np.clip(0, 255)
np_image = np.asarray(transform_np, np.uint8)
np_image = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)
cv2.imwrite('output/style_e{}_s{}_i{}.jpg'.format(count, step, i), np_image)
cv2.imwrite('output/source_e{}_s{}_i{}.jpg'.format(count, step, i), s_np_image)
logger.info('model saving')
print('model saving')
torch.save(self.style_net.state_dict(), 'model/style_model_epoch_{}.pth'.format(count))
logger.info('model save finish')
print('model save finish')
class ImageTransfer:
def __init__(self, epoch, data_path, style_path, vgg_path, lr, spatial_a, spatial_b, spatial_r, temporal_lambda, gpu=False, img_shape=(640, 360)):
self.epoch = epoch
self.data_path = data_path
self.style_path = style_path
self.lr = lr
self.gpu = gpu
self.s_a = spatial_a
self.s_b = spatial_b
self.s_r = spatial_r
self.t_l = temporal_lambda
self.style_net = StyleNet()
self.loss_net = LossNet(vgg_path)
self.style_layer = ['conv1_2', 'conv2_2', 'conv3_4', 'conv4_4']
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
self.img_shape = img_shape
def load_style(self):
img = Image.open(self.style_path)
img = img.resize(self.img_shape)
img = np.asarray(img, np.float32) / 255.0
img = self.transform(img)
img = img.unsqueeze(0)
img = Variable(img, requires_grad=True)
return img
def train(self):
style_img = self.load_style()
if self.gpu:
self.style_net = self.style_net.cuda()
self.loss_net = self.loss_net.cuda()
style_img = style_img.cuda()
adam = optim.Adam(self.style_net.parameters(), lr=self.lr)
sgd = optim.SGD(self.style_net.parameters(), lr=self.lr, momentum=0.9)
adadelta = optim.Adadelta(self.style_net.parameters(), lr=self.lr)
loader = get_image_loader(4, self.data_path, self.img_shape, self.transform)
logger.info('Data Load Success!!')
print('Data Load Success!!')
logger.info('Training Start!!')
print('Training Start!!')
for count in range(self.epoch):
for step, frames in enumerate(loader):
x_t = frames[0]
if self.gpu:
x_t = x_t.cuda()
h_xt = self.style_net(x_t)
s_xt = self.loss_net(x_t, self.style_layer)
s_hxt = self.loss_net(h_xt, self.style_layer)
s = self.loss_net(style_img, self.style_layer)
# ContentLoss, conv4_2
content_loss = ContentLoss(self.gpu)(s_xt[3], s_hxt[3])
#content_loss = ContentLoss(self.gpu)(x_t, h_xt)
# StyleLoss
style_loss = StyleLoss(self.gpu)(s[0], s_hxt[0])
for layer in range(1, len(self.style_layer)):
style_loss += StyleLoss(self.gpu)(s[layer], s_hxt[layer])
# TVLoss
tv_loss = TVLoss()(h_xt)
# Spatial Loss
spatial_loss = self.s_a * content_loss + self.s_r * tv_loss + self.s_b * style_loss
print('content_loss is {}, style_loss is {}, tv_loss is {}'.format(self.s_a * content_loss, self.s_b * style_loss, self.s_r * tv_loss))
Loss = torch.mean(spatial_loss) # spatial_loss + self.t_l * temporal_loss
Loss.backward(retain_graph=True)
sgd.step()
logger.info('Loss is: {}, spatial_loss is: {} step: {} '.format(str(Loss), str(spatial_loss), str(step)))
print('Loss is: {}, spatial_loss is: {}, step: {}'.format(str(Loss), str(spatial_loss), str(step)))
if step % 70 == 0 and step >= 70:
s_np_image = x_t.data.cpu().numpy()
s_np_image = np.squeeze(np.transpose(s_np_image, (0, 2, 3, 1))[0, :, :, :])
transform_np_s = (s_np_image * (0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255
transform_np_s = transform_np_s.clip(0, 255)
s_np_image = np.asarray(transform_np_s, np.uint8)
s_np_image = cv2.cvtColor(s_np_image, cv2.COLOR_RGB2BGR)
np_image = h_xt.data.cpu().numpy()
np_image = np.squeeze(np.transpose(np_image, (0, 2, 3, 1))[0,:,:,:])
# transform_np = (np_image * (0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255
transform_np = (np_image + 1) * 127.5
transform_np = transform_np.clip(0, 255)
np_image = np.asarray(transform_np, np.uint8)
np_image = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)
cv2.imwrite('output/style_e{}_s{}.jpg'.format(count, step), np_image)
cv2.imwrite('output/source_e{}_s{}.jpg'.format(count, step), s_np_image)
logger.info('model saving')
print('model saving')
torch.save(self.style_net.state_dict(), 'model/style_model_epoch_{}.pth'.format(count))
logger.info('model save finish')
print('model save finish')
if __name__ == '__main1__':
# transfer = Transfer(10, 'data', '1.jpeg', 'model/vgg19-dcbb9e9d.pth', 0.1, 0.3, 0.3, 0.1, 0.2, gpu=False, img_shape=(480, 320))
transfer = Transfer(10,
'/data/User/杨远东/登峰造极/视频素材',
'data/1.jpg',
'model/vgg19-dcbb9e9d.pth',
lr=0.001,
spatial_a=1,
spatial_b=0.00001,
spatial_r=0.000001,
temporal_lambda=10000,
gpu=True,
img_shape=(640, 360))
transfer.train()
if __name__ == '__main__':
# transfer = ImageTransfer(10, 'data/PNG', '1.jpeg', 'model/vgg19-dcbb9e9d.pth',
# lr=0.001,spatial_a=1,spatial_b=0.00001,spatial_r=0.000001,temporal_lambda=10000,
# gpu=False,
# img_shape=(640, 360))
transfer = ImageTransfer(100,
'/data/User/杨远东/登峰造极/图片素材/buildings',
'data/1.jpg',
'model/vgg19-dcbb9e9d.pth',
lr=0.01,
spatial_a=1,
spatial_b=0.00001,
spatial_r=0.00001,
temporal_lambda=10000,
gpu=True,
img_shape=(640, 360))
transfer.train()
| 44.778146
| 184
| 0.527915
| 1,718
| 13,523
| 3.926077
| 0.121653
| 0.046701
| 0.030245
| 0.007116
| 0.810378
| 0.795997
| 0.759822
| 0.759822
| 0.71401
| 0.707784
| 0
| 0.052745
| 0.349479
| 13,523
| 302
| 185
| 44.778146
| 0.713993
| 0.059972
| 0
| 0.637555
| 0
| 0.004367
| 0.084463
| 0.025528
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026201
| false
| 0
| 0.052402
| 0
| 0.09607
| 0.052402
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96cdbbf381a4a8d78d2d713ca51f6d9fc07c348b
| 38
|
py
|
Python
|
vnpy_sgit/gateway/__init__.py
|
Edanflame/vnpy_sgit
|
818d5d29db88ce5388bda755ecab78aafb617849
|
[
"MIT"
] | null | null | null |
vnpy_sgit/gateway/__init__.py
|
Edanflame/vnpy_sgit
|
818d5d29db88ce5388bda755ecab78aafb617849
|
[
"MIT"
] | 3
|
2021-11-05T00:27:18.000Z
|
2021-12-06T02:47:03.000Z
|
vnpy_sgit/gateway/__init__.py
|
Edanflame/vnpy_sgit
|
818d5d29db88ce5388bda755ecab78aafb617849
|
[
"MIT"
] | 2
|
2021-10-09T02:13:48.000Z
|
2021-10-19T02:41:15.000Z
|
from .sgit_gateway import SgitGateway
| 19
| 37
| 0.868421
| 5
| 38
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96d860959e1bb2251eb2b943c5a9de153e5287a9
| 150
|
py
|
Python
|
zunzun/http_kernel/__init__.py
|
aprezcuba24/zunzun
|
cc294d9dfb84695be0ed1425cf946a0f4ea644a9
|
[
"MIT"
] | null | null | null |
zunzun/http_kernel/__init__.py
|
aprezcuba24/zunzun
|
cc294d9dfb84695be0ed1425cf946a0f4ea644a9
|
[
"MIT"
] | null | null | null |
zunzun/http_kernel/__init__.py
|
aprezcuba24/zunzun
|
cc294d9dfb84695be0ed1425cf946a0f4ea644a9
|
[
"MIT"
] | null | null | null |
from .kernel import HttpKernel # noqa
from .router import Router # noqa
from .request import Request # noqa
from .response import Response # noqa
| 30
| 38
| 0.76
| 20
| 150
| 5.7
| 0.4
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186667
| 150
| 4
| 39
| 37.5
| 0.934426
| 0.126667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4fcb5159644c85b3569fb844cebc4d82cb2b1757
| 5,077
|
py
|
Python
|
tests/cli/config/test_config_fix_versions.py
|
EddLabs/eddington-static
|
cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5
|
[
"Apache-2.0"
] | null | null | null |
tests/cli/config/test_config_fix_versions.py
|
EddLabs/eddington-static
|
cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5
|
[
"Apache-2.0"
] | null | null | null |
tests/cli/config/test_config_fix_versions.py
|
EddLabs/eddington-static
|
cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5
|
[
"Apache-2.0"
] | null | null | null |
from statue.cli.cli import statue_cli
from tests.constants import COMMAND1, COMMAND2
from tests.util import command_builder_mock, dummy_version, dummy_versions
def test_config_fix_version_with_no_installed_packages(
cli_runner, mock_build_configuration_from_file, mock_configuration_path
):
command_builder1, command_builder2 = (
command_builder_mock(name=COMMAND1, installed=False),
command_builder_mock(name=COMMAND2, installed=False),
)
configuration = mock_build_configuration_from_file.return_value
configuration.commands_repository.add_command_builders(
command_builder1, command_builder2
)
result = cli_runner.invoke(statue_cli, ["config", "fix-versions"])
configuration.to_toml.assert_called_once_with(mock_configuration_path.return_value)
assert result.exit_code == 0
assert command_builder1.version is None
assert command_builder2.version is None
def test_config_fix_version_with_one_installed_package(
cli_runner,
mock_build_configuration_from_file,
mock_configuration_path,
):
version1 = dummy_version()
command_builder1, command_builder2 = (
command_builder_mock(name=COMMAND1, installed=True, installed_version=version1),
command_builder_mock(name=COMMAND2, installed=False),
)
configuration = mock_build_configuration_from_file.return_value
configuration.commands_repository.add_command_builders(
command_builder1, command_builder2
)
result = cli_runner.invoke(statue_cli, ["config", "fix-versions"])
configuration.to_toml.assert_called_once_with(mock_configuration_path.return_value)
assert result.exit_code == 0
assert command_builder1.version == version1
assert command_builder2.version is None
def test_config_fix_version_with_two_installed_packages(
cli_runner,
mock_build_configuration_from_file,
mock_configuration_path,
):
version1, version2 = dummy_versions(2)
command_builder1, command_builder2 = (
command_builder_mock(name=COMMAND1, installed=True, installed_version=version1),
command_builder_mock(name=COMMAND2, installed=True, installed_version=version2),
)
configuration = mock_build_configuration_from_file.return_value
configuration.commands_repository.add_command_builders(
command_builder1, command_builder2
)
result = cli_runner.invoke(statue_cli, ["config", "fix-versions"])
configuration.to_toml.assert_called_once_with(mock_configuration_path.return_value)
assert result.exit_code == 0
assert command_builder1.version == version1
assert command_builder2.version == version2
def test_config_fix_version_with_no_commands(
cli_runner,
mock_configuration_path,
mock_build_configuration_from_file,
):
result = cli_runner.invoke(statue_cli, ["config", "fix-versions"])
configuration = mock_build_configuration_from_file.return_value
configuration.to_toml.assert_not_called()
assert (
result.exit_code == 0
), f"Existed with failure code and exception: {result.exception}"
def test_config_fix_version_latest(
cli_runner,
mock_build_configuration_from_file,
mock_configuration_path,
):
version1, version2 = dummy_versions(2)
command_builder1, command_builder2 = (
command_builder_mock(name=COMMAND1, installed=True, installed_version=version1),
command_builder_mock(name=COMMAND2, installed=True, installed_version=version2),
)
configuration = mock_build_configuration_from_file.return_value
configuration.commands_repository.add_command_builders(
command_builder1, command_builder2
)
result = cli_runner.invoke(statue_cli, ["config", "fix-versions", "--latest"])
configuration.to_toml.assert_called_once_with(mock_configuration_path.return_value)
command_builder1.update.assert_called_once()
command_builder2.update.assert_called_once()
assert result.exit_code == 0
assert command_builder1.version == version1
assert command_builder2.version == version2
def test_config_fix_version_with_configuration_path(
cli_runner,
mock_build_configuration_from_file,
tmp_path,
mock_configuration_path,
):
config_path = tmp_path / "statue.toml"
config_path.touch()
version1 = dummy_version()
command_builder1, command_builder2 = (
command_builder_mock(name=COMMAND1, installed=True, installed_version=version1),
command_builder_mock(name=COMMAND2, installed=False),
)
configuration = mock_build_configuration_from_file.return_value
configuration.commands_repository.add_command_builders(
command_builder1, command_builder2
)
result = cli_runner.invoke(
statue_cli, ["config", "fix-versions", "--config", str(config_path)]
)
configuration.to_toml.assert_called_once_with(config_path)
assert (
result.exit_code == 0
), f"Exited with error code and exception: {result.exception}"
assert command_builder1.version == version1
assert command_builder2.version is None
| 36.007092
| 88
| 0.771125
| 602
| 5,077
| 6.081395
| 0.116279
| 0.065556
| 0.072111
| 0.085223
| 0.874351
| 0.842939
| 0.830921
| 0.801147
| 0.801147
| 0.786397
| 0
| 0.016302
| 0.154225
| 5,077
| 140
| 89
| 36.264286
| 0.836283
| 0
| 0
| 0.695652
| 0
| 0
| 0.049242
| 0
| 0
| 0
| 0
| 0
| 0.208696
| 1
| 0.052174
| false
| 0
| 0.026087
| 0
| 0.078261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8b25c360fd0fe4bc747eb537a0f93f86c3101b72
| 32
|
py
|
Python
|
geosearch/__init__.py
|
shansixiong/geosearch
|
cd1eade5309d59a0a97cae92c4e55e7428d5aa32
|
[
"MIT"
] | 4
|
2020-09-23T11:25:48.000Z
|
2022-01-02T19:03:27.000Z
|
geosearch/__init__.py
|
shansixiong/geosearch
|
cd1eade5309d59a0a97cae92c4e55e7428d5aa32
|
[
"MIT"
] | 1
|
2018-04-06T13:07:31.000Z
|
2018-04-06T13:07:31.000Z
|
geosearch/__init__.py
|
shansixiong/geosearch
|
cd1eade5309d59a0a97cae92c4e55e7428d5aa32
|
[
"MIT"
] | null | null | null |
from .geosearch import geoSearch
| 32
| 32
| 0.875
| 4
| 32
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 1
| 32
| 32
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c6dae23c3ea9a8f7c91382c0e7764860d27cc8f
| 34
|
py
|
Python
|
source/sagemaker/sagemaker_graph_fraud_detection/container_build/__init__.py
|
awslabs/sagemaker-graph-fraud-detection
|
35e4203dd6ec7298c12361140013b487765cbd11
|
[
"Apache-2.0"
] | 60
|
2020-04-15T22:34:14.000Z
|
2022-03-31T18:04:19.000Z
|
source/sagemaker/sagemaker_graph_fraud_detection/container_build/__init__.py
|
sojiadeshina/sagemaker-graph-fraud-detection
|
1a6fd57c32dea104cd26be3352494adbb8fcb0b5
|
[
"Apache-2.0"
] | null | null | null |
source/sagemaker/sagemaker_graph_fraud_detection/container_build/__init__.py
|
sojiadeshina/sagemaker-graph-fraud-detection
|
1a6fd57c32dea104cd26be3352494adbb8fcb0b5
|
[
"Apache-2.0"
] | 23
|
2020-05-15T15:30:56.000Z
|
2022-02-25T20:30:52.000Z
|
from .container_build import build
| 34
| 34
| 0.882353
| 5
| 34
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8ca9598fa1f841eb69865c71500e5f94509dd198
| 181
|
py
|
Python
|
python_cloud_system/mysite/helloapp/views.py
|
MiracleWong/MoocStudy
|
e22c6e69b77b98b6d71b52d90321aa442d726ffa
|
[
"MIT"
] | null | null | null |
python_cloud_system/mysite/helloapp/views.py
|
MiracleWong/MoocStudy
|
e22c6e69b77b98b6d71b52d90321aa442d726ffa
|
[
"MIT"
] | null | null | null |
python_cloud_system/mysite/helloapp/views.py
|
MiracleWong/MoocStudy
|
e22c6e69b77b98b6d71b52d90321aa442d726ffa
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def hello(request):
return HttpResponse("Hello World, I'm coming soon ...")
| 22.625
| 59
| 0.756906
| 25
| 181
| 5.48
| 0.8
| 0.145985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154696
| 181
| 7
| 60
| 25.857143
| 0.895425
| 0.127072
| 0
| 0
| 0
| 0
| 0.205128
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8cb9440f206ea0ebce437e84fc467a55077b587a
| 196
|
py
|
Python
|
Temp.py
|
AlPus108/Python_lessons
|
0e96117d9a8b76fd651e137fc126ddedaa6accd9
|
[
"MIT"
] | null | null | null |
Temp.py
|
AlPus108/Python_lessons
|
0e96117d9a8b76fd651e137fc126ddedaa6accd9
|
[
"MIT"
] | null | null | null |
Temp.py
|
AlPus108/Python_lessons
|
0e96117d9a8b76fd651e137fc126ddedaa6accd9
|
[
"MIT"
] | null | null | null |
number_list = [1, 2, 3, 4, 5]
number_list_iterator = iter(number_list)
print(number_list_iterator.__next__())
print(number_list_iterator.__next__())
print(next(number_list_iterator))
adult =
| 19.6
| 40
| 0.770408
| 29
| 196
| 4.586207
| 0.413793
| 0.451128
| 0.541353
| 0.345865
| 0.443609
| 0.443609
| 0
| 0
| 0
| 0
| 0
| 0.028409
| 0.102041
| 196
| 10
| 41
| 19.6
| 0.727273
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
8cebda1dada5940d93276ad6a72b2112f51c79db
| 19,604
|
py
|
Python
|
skills/dff_gaming_skill/dialogflows/flows/gaming/flow.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 34
|
2021-08-18T14:51:44.000Z
|
2022-03-10T14:14:48.000Z
|
skills/dff_gaming_skill/dialogflows/flows/gaming/flow.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 27
|
2021-08-30T14:42:09.000Z
|
2022-03-17T22:11:45.000Z
|
skills/dff_gaming_skill/dialogflows/flows/gaming/flow.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 40
|
2021-08-22T07:13:32.000Z
|
2022-03-29T11:45:32.000Z
|
# %%
import logging
import os
from functools import partial
import sentry_sdk
from dff import dialogflow_extension
import common.dialogflow_framework.utils.state as state_utils
import dialogflows.common.shared_memory_ops as gaming_memory
import dialogflows.flows.gaming.intents as gaming_intents
import dialogflows.flows.gaming.nlg as gaming_nlg
import dialogflows.scopes as scopes
from dialogflows.common.intents import (
LogicalOr,
user_doesnt_say_yes_request,
user_says_anything_request,
user_says_yes_request,
)
from dialogflows.common.nlg import error_response, link_to_other_skills_response
from dialogflows.flows.gaming.states import State as GamingState
from dialogflows.flows.minecraft.intents import is_game_candidate_minecraft, is_minecraft_mentioned_in_user_or_bot_uttr
from dialogflows.flows.minecraft.states import State as MinecraftState
sentry_sdk.init(dsn=os.getenv("SENTRY_DSN"))
logger = logging.getLogger(__name__)
##################################################################################################################
# error
##################################################################################################################
##################################################################################################################
##################################################################################################################
# linking
##################################################################################################################
##################################################################################################################
simplified_dialogflow = dialogflow_extension.DFEasyFilling(GamingState.USR_START)
##################################################################################################################
# GLOBAL
simplified_dialogflow.add_global_user_serial_transitions(
{(scopes.MINECRAFT, MinecraftState.USR_START): gaming_intents.user_wants_to_discuss_minecraft_request}
)
##################################################################################################################
# START
# ######### transition State.USR_START -> State.SYS_HI if hi_request==True (request returns only bool values) ####
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_START,
{
GamingState.SYS_USER_MAYBE_WANTS_TO_TALK_ABOUT_PARTICULAR_GAME: gaming_intents.user_maybe_wants_to_talk_about_particular_game_request,
GamingState.SYS_USER_DEFINITELY_WANTS_TO_TALK_ABOUT_GAME_BOT_NEVER_PLAYED: partial(
gaming_intents.user_definitely_wants_to_talk_about_particular_game_request,
additional_check=lambda n, v: not is_minecraft_mentioned_in_user_or_bot_uttr(n, v),
),
GamingState.SYS_USER_DEFINITELY_WANTS_TO_TALK_ABOUT_GAME_THAT_USER_PLAYED_AND_BOT_DIDNT_PLAY: partial(
gaming_intents.user_definitely_wants_to_talk_about_game_that_user_played_request,
additional_check=lambda n, v: not is_minecraft_mentioned_in_user_or_bot_uttr(n, v),
),
GamingState.SYS_USER_DOESNT_LIKE_GAMING: gaming_intents.user_doesnt_like_gaming_request,
GamingState.SYS_USER_DIDNT_NAME_GAME: LogicalOr(
gaming_intents.user_didnt_name_game_after_question_about_games_and_didnt_refuse_to_discuss_request,
partial(gaming_intents.user_mentioned_games_as_his_interest_request, first_time=False),
),
GamingState.SYS_USER_MENTIONED_GAMES_AS_HIS_INTEREST: gaming_intents.user_mentioned_games_as_his_interest_request,
},
)
# ######### if all *_request==False then transition State.USR_START -> State.SYS_ERR #########
simplified_dialogflow.set_error_successor(GamingState.USR_START, GamingState.SYS_ERR)
##################################################################################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_MENTIONED_GAMES_AS_HIS_INTEREST,
(scopes.MAIN, scopes.State.USR_ROOT),
gaming_nlg.ask_what_game_user_likes_response,
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_MENTIONED_GAMES_AS_HIS_INTEREST, GamingState.SYS_ERR)
##################################################################################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_MAYBE_WANTS_TO_TALK_ABOUT_PARTICULAR_GAME,
GamingState.USR_CHECK_WITH_USER_GAME_TITLE,
gaming_nlg.check_game_name_with_user_response,
)
simplified_dialogflow.set_error_successor(
GamingState.SYS_USER_MAYBE_WANTS_TO_TALK_ABOUT_PARTICULAR_GAME, GamingState.SYS_ERR
)
##################################################################################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_DEFINITELY_WANTS_TO_TALK_ABOUT_GAME_BOT_NEVER_PLAYED,
GamingState.USR_CONFESS_BOT_NEVER_PLAYED_GAME_ASK_USER_IF_HE_PLAYED,
partial(
gaming_nlg.confess_bot_never_played_game_and_ask_user_response,
candidate_game_id_is_already_set=False,
did_user_play=True,
),
)
simplified_dialogflow.set_error_successor(
GamingState.SYS_USER_DEFINITELY_WANTS_TO_TALK_ABOUT_GAME_BOT_NEVER_PLAYED, GamingState.SYS_ERR
)
##################################################################################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_DEFINITELY_WANTS_TO_TALK_ABOUT_GAME_THAT_USER_PLAYED_AND_BOT_DIDNT_PLAY,
GamingState.USR_CONFESS_BOT_NEVER_PLAYED_GAME_ASK_HOW_LONG_USER_PLAYED,
partial(
gaming_nlg.confess_bot_never_played_game_and_ask_user_response,
candidate_game_id_is_already_set=False,
how_long_user_played=True,
),
)
simplified_dialogflow.set_error_successor(
GamingState.SYS_USER_DEFINITELY_WANTS_TO_TALK_ABOUT_GAME_THAT_USER_PLAYED_AND_BOT_DIDNT_PLAY, GamingState.SYS_ERR
)
##################################################################################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_DOESNT_LIKE_GAMING,
GamingState.USR_ASK_IF_USER_THINKS_THAT_GAMING_IS_UNHEALTHY,
gaming_nlg.ask_if_user_thinks_that_gaming_is_unhealthy_response,
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_DOESNT_LIKE_GAMING, GamingState.SYS_ERR)
##################################################################################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_DIDNT_NAME_GAME,
GamingState.USR_ASK_IF_USER_PLAYED_MINECRAFT,
gaming_nlg.ask_if_user_played_minecraft_response,
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_DIDNT_NAME_GAME, GamingState.SYS_ERR)
##################################################################################################################
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_ASK_IF_USER_THINKS_THAT_GAMING_IS_UNHEALTHY,
{
GamingState.SYS_USER_THINKS_GAMING_IS_UNHEALTHY: user_says_yes_request,
GamingState.SYS_USER_THINKS_GAMING_IS_HEALTHY: user_doesnt_say_yes_request,
},
)
simplified_dialogflow.set_error_successor(
GamingState.USR_ASK_IF_USER_THINKS_THAT_GAMING_IS_UNHEALTHY, GamingState.SYS_ERR
)
#########################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_THINKS_GAMING_IS_UNHEALTHY,
(scopes.MAIN, scopes.State.USR_ROOT),
gaming_nlg.tell_about_healthy_gaming_and_ask_what_sport_user_likes_response,
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_THINKS_GAMING_IS_UNHEALTHY, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_THINKS_GAMING_IS_HEALTHY,
(scopes.MAIN, scopes.State.USR_ROOT),
partial(
gaming_nlg.tell_about_minecraft_animation_and_ask_what_animation_user_likes_response,
prefix="Okay. I guess some people just don't think that playing video games is fun.",
),
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_THINKS_GAMING_IS_HEALTHY, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_ASK_IF_USER_PLAYED_MINECRAFT,
{
(scopes.MINECRAFT, MinecraftState.USR_START): user_says_yes_request,
GamingState.SYS_USER_DIDNT_PLAY_MINECRAFT: user_doesnt_say_yes_request,
},
)
simplified_dialogflow.set_error_successor(GamingState.USR_ASK_IF_USER_PLAYED_MINECRAFT, GamingState.SYS_ERR)
#########################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_DIDNT_PLAY_MINECRAFT,
(scopes.MAIN, scopes.State.USR_ROOT),
gaming_nlg.tell_about_minecraft_animation_and_ask_what_animation_user_likes_response,
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_DIDNT_PLAY_MINECRAFT, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_CHECK_WITH_USER_GAME_TITLE,
{
GamingState.SYS_USER_CONFIRMS_GAME_BOT_NEVER_PLAYED: partial(
user_says_yes_request,
additional_check=lambda n, v: not is_game_candidate_minecraft(n, v),
),
(scopes.MINECRAFT, MinecraftState.USR_START): partial(
user_says_yes_request, additional_check=is_game_candidate_minecraft
),
GamingState.SYS_USER_DOESNT_CONFIRM_GAME: user_doesnt_say_yes_request,
},
)
simplified_dialogflow.set_error_successor(GamingState.USR_CHECK_WITH_USER_GAME_TITLE, GamingState.SYS_ERR)
#########################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_DOESNT_CONFIRM_GAME,
GamingState.USR_START,
partial(
link_to_other_skills_response,
shared_memory_actions=[gaming_memory.clean_candidate_game_id],
prefix="Sorry, never mind.",
),
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_DOESNT_CONFIRM_GAME, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_CONFIRMS_GAME_BOT_NEVER_PLAYED,
GamingState.USR_CONFESS_BOT_NEVER_PLAYED_GAME_ASK_USER_IF_HE_PLAYED,
partial(
gaming_nlg.confess_bot_never_played_game_and_ask_user_response,
candidate_game_id_is_already_set=True,
did_user_play=True,
),
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_CONFIRMS_GAME_BOT_NEVER_PLAYED, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_CONFESS_BOT_NEVER_PLAYED_GAME_ASK_USER_IF_HE_PLAYED,
{
GamingState.SYS_USER_PLAYED_GAME: user_says_yes_request,
GamingState.SYS_USER_DIDNT_PLAY_GAME: user_doesnt_say_yes_request,
},
)
simplified_dialogflow.set_error_successor(
GamingState.USR_CONFESS_BOT_NEVER_PLAYED_GAME_ASK_USER_IF_HE_PLAYED, GamingState.SYS_ERR
)
##############################################################
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_CONFESS_BOT_NEVER_PLAYED_GAME_ASK_HOW_LONG_USER_PLAYED,
{GamingState.SYS_USER_TELLS_HOW_LONG_HE_PLAYED: user_says_anything_request},
)
simplified_dialogflow.set_error_successor(
GamingState.USR_CONFESS_BOT_NEVER_PLAYED_GAME_ASK_HOW_LONG_USER_PLAYED, GamingState.SYS_ERR
)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_TELLS_HOW_LONG_HE_PLAYED,
GamingState.USR_COMMENT_ON_USER_EXPERIENCE_AND_ASK_IF_USER_RECOMMENDS_GAME,
gaming_nlg.comment_on_user_experience_and_ask_if_user_recommends_game_response,
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_TELLS_HOW_LONG_HE_PLAYED, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_PLAYED_GAME,
GamingState.USR_TELL_ABOUT_WHAT_BOT_LIKES_AND_ASK_IF_USER_RECOMMENDS_GAME,
gaming_nlg.tell_about_what_bot_likes_and_ask_if_user_recommends_game_response,
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_PLAYED_GAME, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_TELL_ABOUT_WHAT_BOT_LIKES_AND_ASK_IF_USER_RECOMMENDS_GAME,
{
GamingState.SYS_USER_RECOMMENDS_GAME: user_says_yes_request,
GamingState.SYS_USER_DOESNT_RECOMMEND_GAME: user_doesnt_say_yes_request,
},
)
simplified_dialogflow.set_error_successor(
GamingState.USR_TELL_ABOUT_WHAT_BOT_LIKES_AND_ASK_IF_USER_RECOMMENDS_GAME, GamingState.SYS_ERR
)
##############################################################
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_COMMENT_ON_USER_EXPERIENCE_AND_ASK_IF_USER_RECOMMENDS_GAME,
{
GamingState.SYS_USER_RECOMMENDS_GAME: user_says_yes_request,
GamingState.SYS_USER_DOESNT_RECOMMEND_GAME: user_doesnt_say_yes_request,
},
)
simplified_dialogflow.set_error_successor(
GamingState.USR_COMMENT_ON_USER_EXPERIENCE_AND_ASK_IF_USER_RECOMMENDS_GAME, GamingState.SYS_ERR
)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_RECOMMENDS_GAME,
(scopes.MAIN, scopes.State.USR_ROOT),
partial(link_to_other_skills_response, prefix="Thank you, I will definitely check it up!"),
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_RECOMMENDS_GAME, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_DOESNT_RECOMMEND_GAME,
(scopes.MAIN, scopes.State.USR_ROOT),
partial(link_to_other_skills_response, prefix="Thank you for saving my time!"),
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_DOESNT_RECOMMEND_GAME, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_DIDNT_PLAY_GAME,
GamingState.USR_SUGGEST_USER_GAME_DESCRIPTION,
gaming_nlg.suggest_user_game_description_response,
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_DIDNT_PLAY_GAME, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_SUGGEST_USER_GAME_DESCRIPTION,
{
GamingState.SYS_USER_WANTS_GAME_DESCRIPTION_AND_2_OR_MORE_TURNS_OF_DESCRIPTION_REMAIN: partial(
user_says_yes_request,
additional_check=gaming_memory.are_there_2_or_more_turns_left_in_game_description,
),
GamingState.SYS_USER_WANTS_GAME_DESCRIPTION_LAST_TURN_OF_DESCRIPTION: partial(
user_says_yes_request,
additional_check=lambda n, v: not gaming_memory.are_there_2_or_more_turns_left_in_game_description(n, v),
),
GamingState.SYS_USER_DOESNT_WANT_GAME_DESCRIPTION: user_doesnt_say_yes_request,
},
)
simplified_dialogflow.set_error_successor(GamingState.USR_SUGGEST_USER_GAME_DESCRIPTION, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_WANTS_GAME_DESCRIPTION_AND_2_OR_MORE_TURNS_OF_DESCRIPTION_REMAIN,
GamingState.USR_DESCRIBE_GAME_TO_USER_AND_ASK_IF_HE_WANTS_MORE,
partial(gaming_nlg.describe_game_to_user_response, ask_if_user_wants_more=True),
)
simplified_dialogflow.set_error_successor(
GamingState.SYS_USER_WANTS_GAME_DESCRIPTION_AND_2_OR_MORE_TURNS_OF_DESCRIPTION_REMAIN, GamingState.SYS_ERR
)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_WANTS_GAME_DESCRIPTION_LAST_TURN_OF_DESCRIPTION,
GamingState.USR_DESCRIBE_GAME_TO_USER_AND_ASK_HE_WANTS_TO_PLAY_GAME,
partial(gaming_nlg.describe_game_to_user_response, ask_if_user_wants_more=False),
)
simplified_dialogflow.set_error_successor(
GamingState.SYS_USER_WANTS_GAME_DESCRIPTION_LAST_TURN_OF_DESCRIPTION, GamingState.SYS_ERR
)
##############################################################
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_DESCRIBE_GAME_TO_USER_AND_ASK_IF_HE_WANTS_MORE,
{
GamingState.SYS_USER_WANTS_GAME_DESCRIPTION_AND_2_OR_MORE_TURNS_OF_DESCRIPTION_REMAIN: partial(
user_says_yes_request, additional_check=gaming_memory.are_there_2_or_more_turns_left_in_game_description
),
GamingState.SYS_USER_WANTS_GAME_DESCRIPTION_LAST_TURN_OF_DESCRIPTION: partial(
user_says_yes_request,
additional_check=lambda n, v: not gaming_memory.are_there_2_or_more_turns_left_in_game_description(n, v),
),
GamingState.SYS_USER_DOESNT_WANT_GAME_DESCRIPTION: user_doesnt_say_yes_request,
},
)
simplified_dialogflow.set_error_successor(
GamingState.USR_DESCRIBE_GAME_TO_USER_AND_ASK_IF_HE_WANTS_MORE, GamingState.SYS_ERR
)
##############################################################
simplified_dialogflow.add_user_serial_transitions(
GamingState.USR_DESCRIBE_GAME_TO_USER_AND_ASK_HE_WANTS_TO_PLAY_GAME,
{
GamingState.SYS_USER_SAYS_HE_WANTS_TO_PLAY_GAME: user_says_yes_request,
GamingState.SYS_USER_SAYS_HE_DOESNT_WANT_TO_PLAY_GAME: user_doesnt_say_yes_request,
},
)
simplified_dialogflow.set_error_successor(
GamingState.USR_DESCRIBE_GAME_TO_USER_AND_ASK_HE_WANTS_TO_PLAY_GAME, GamingState.SYS_ERR
)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_DOESNT_WANT_GAME_DESCRIPTION,
(scopes.MAIN, scopes.State.USR_ROOT),
partial(
link_to_other_skills_response,
prefix="Okay.",
shared_memory_actions=[lambda vars: state_utils.save_to_shared_memory(vars, curr_summary_sent_index=0)],
),
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_DOESNT_WANT_GAME_DESCRIPTION, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_SAYS_HE_WANTS_TO_PLAY_GAME,
(scopes.MAIN, scopes.State.USR_ROOT),
partial(link_to_other_skills_response, prefix="Cool! Hope you will have good time."),
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_SAYS_HE_WANTS_TO_PLAY_GAME, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_system_transition(
GamingState.SYS_USER_SAYS_HE_DOESNT_WANT_TO_PLAY_GAME,
(scopes.MAIN, scopes.State.USR_ROOT),
partial(link_to_other_skills_response, prefix="Cool! I am glad I could help."),
)
simplified_dialogflow.set_error_successor(GamingState.SYS_USER_SAYS_HE_DOESNT_WANT_TO_PLAY_GAME, GamingState.SYS_ERR)
##############################################################
simplified_dialogflow.add_global_user_serial_transitions(
{
GamingState.SYS_ERR: (lambda x, y: True, -1.0),
},
)
simplified_dialogflow.add_system_transition(
GamingState.SYS_ERR,
(scopes.MAIN, scopes.State.USR_ROOT),
error_response,
)
dialogflow = simplified_dialogflow.get_dialogflow()
| 50.525773
| 142
| 0.702969
| 2,240
| 19,604
| 5.523661
| 0.0875
| 0.115413
| 0.098925
| 0.069345
| 0.866968
| 0.847733
| 0.825022
| 0.777742
| 0.738301
| 0.678089
| 0
| 0.000616
| 0.088757
| 19,604
| 387
| 143
| 50.656331
| 0.692006
| 0.010457
| 0
| 0.400612
| 0
| 0
| 0.01493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045872
| 0
| 0.045872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
50f7dd32985dea14f025fd905bb3e7efe797b109
| 54
|
py
|
Python
|
tasks/data_loading/__init__.py
|
thefirebanks/policy-data-analyzer
|
670a4ea72ab71975b84c4a4ec43d573371c4a986
|
[
"RSA-MD"
] | 13
|
2020-12-11T12:10:20.000Z
|
2021-04-27T22:54:25.000Z
|
tasks/data_loading/__init__.py
|
thefirebanks/policy-data-analyzer
|
670a4ea72ab71975b84c4a4ec43d573371c4a986
|
[
"RSA-MD"
] | 40
|
2020-11-24T06:48:53.000Z
|
2021-04-28T05:20:37.000Z
|
tasks/data_loading/__init__.py
|
thefirebanks/policy-data-analyzer
|
670a4ea72ab71975b84c4a4ec43d573371c4a986
|
[
"RSA-MD"
] | 5
|
2020-11-26T08:23:05.000Z
|
2021-04-19T18:08:20.000Z
|
from .src.s3_client import *
from .src.utils import *
| 18
| 28
| 0.740741
| 9
| 54
| 4.333333
| 0.666667
| 0.358974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.148148
| 54
| 2
| 29
| 27
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0fac305a5973da7dbd70463ee110428d37d8290a
| 35
|
py
|
Python
|
bikeshed/stringEnum/__init__.py
|
saschanaz/bikeshed
|
fb1e763a4f49852a7dabe8d783c6980416b238ef
|
[
"CC0-1.0"
] | 775
|
2015-01-06T16:58:59.000Z
|
2022-03-31T23:49:10.000Z
|
bikeshed/stringEnum/__init__.py
|
saschanaz/bikeshed
|
fb1e763a4f49852a7dabe8d783c6980416b238ef
|
[
"CC0-1.0"
] | 1,495
|
2015-01-06T01:06:00.000Z
|
2022-03-31T20:16:13.000Z
|
bikeshed/stringEnum/__init__.py
|
frivoal/bikeshed
|
132fff3bd80d0059b5a2ac0cd4e3317db34dec12
|
[
"CC0-1.0"
] | 196
|
2015-01-26T23:56:59.000Z
|
2022-03-23T20:35:59.000Z
|
from .StringEnum import StringEnum
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0fb41aff5675088cfe6f89ad826a5a7e0c9819fd
| 1,187
|
py
|
Python
|
1-9/4. parse_ranges/parse_ranges.py
|
dcragusa/PythonMorsels
|
5f75b51a68769036e4004e9ccdada6b220124ab6
|
[
"MIT"
] | 1
|
2021-11-30T05:03:24.000Z
|
2021-11-30T05:03:24.000Z
|
1-9/4. parse_ranges/parse_ranges.py
|
dcragusa/PythonMorsels
|
5f75b51a68769036e4004e9ccdada6b220124ab6
|
[
"MIT"
] | null | null | null |
1-9/4. parse_ranges/parse_ranges.py
|
dcragusa/PythonMorsels
|
5f75b51a68769036e4004e9ccdada6b220124ab6
|
[
"MIT"
] | 2
|
2021-04-18T05:26:43.000Z
|
2021-11-28T18:46:43.000Z
|
# def parse_ranges(input_string):
#
# output = []
# ranges = [item.strip() for item in input_string.split(',')]
#
# for item in ranges:
# start, end = [int(i) for i in item.split('-')]
# output.extend(range(start, end + 1))
#
# return output
# def parse_ranges(input_string):
#
# ranges = [item.strip() for item in input_string.split(',')]
#
# for item in ranges:
# start, end = [int(i) for i in item.split('-')]
# yield from range(start, end + 1)
# def parse_ranges(input_string):
#
# ranges = [range_.strip() for range_ in input_string.split(',')]
#
# for item in ranges:
# if '-' in item:
# start, end = [int(i) for i in item.split('-')]
# yield from range(start, end + 1)
# else:
# yield int(item)
def parse_ranges(input_string):
ranges = [range_.strip() for range_ in input_string.split(',')]
for item in ranges:
if '->' in item:
yield int(item.split('-')[0])
elif '-' in item:
start, end = [int(i) for i in item.split('-')]
yield from range(start, end + 1)
else:
yield int(item)
| 26.977273
| 69
| 0.536647
| 157
| 1,187
| 3.955414
| 0.159236
| 0.141707
| 0.086957
| 0.122383
| 0.89372
| 0.853462
| 0.813205
| 0.813205
| 0.813205
| 0.813205
| 0
| 0.006046
| 0.303286
| 1,187
| 43
| 70
| 27.604651
| 0.744861
| 0.642797
| 0
| 0
| 0
| 0
| 0.015152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0fc0d0d499365d7ed67bf3c8d6f6eecde1a1514c
| 127
|
py
|
Python
|
simplelib.py
|
mlangc/demo-python-package
|
c649724e31f102042176bd50ed1bbef14b8dff74
|
[
"Apache-2.0"
] | null | null | null |
simplelib.py
|
mlangc/demo-python-package
|
c649724e31f102042176bd50ed1bbef14b8dff74
|
[
"Apache-2.0"
] | null | null | null |
simplelib.py
|
mlangc/demo-python-package
|
c649724e31f102042176bd50ed1bbef14b8dff74
|
[
"Apache-2.0"
] | null | null | null |
import version_query
def simple_fun(x):
return x * x
def get_version():
return version_query.predict_version_str()
| 12.7
| 46
| 0.732283
| 19
| 127
| 4.578947
| 0.578947
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188976
| 127
| 9
| 47
| 14.111111
| 0.84466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
0fe399cc34b67ecfbbf1bb8401af561776b4a914
| 4,655
|
py
|
Python
|
tests/rest/test_api_execute_logs.py
|
DmitryRibalka/monitorrent
|
f329d4bca151360d29e93d5369a1d21268d8998f
|
[
"WTFPL"
] | 465
|
2015-08-31T09:16:41.000Z
|
2022-03-12T10:33:04.000Z
|
tests/rest/test_api_execute_logs.py
|
DmitryRibalka/monitorrent
|
f329d4bca151360d29e93d5369a1d21268d8998f
|
[
"WTFPL"
] | 340
|
2015-07-18T17:31:54.000Z
|
2022-03-30T15:16:25.000Z
|
tests/rest/test_api_execute_logs.py
|
DmitryRibalka/monitorrent
|
f329d4bca151360d29e93d5369a1d21268d8998f
|
[
"WTFPL"
] | 87
|
2015-07-18T10:52:24.000Z
|
2022-03-27T09:52:35.000Z
|
from builtins import range
import json
import falcon
from mock import MagicMock
from ddt import ddt, data
from tests import RestTestBase
from monitorrent.rest.execute_logs import ExecuteLogs
class ExecuteLogsTest(RestTestBase):
def test_get_all(self):
entries = [{}, {}, {}]
count = 3
log_manager = MagicMock()
log_manager.get_log_entries = MagicMock(return_value=(entries, count))
# noinspection PyTypeChecker
execute_logs = ExecuteLogs(log_manager)
self.api.add_route('/api/execute/logs', execute_logs)
body = self.simulate_request('/api/execute/logs', query_string='take=10', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries, result['data'])
self.assertEqual(count, result['count'])
def test_get_paged(self):
# count should be less than 30
count = 23
entries = [{'i': i} for i in range(count)]
def get_log_entries(skip, take):
return entries[skip:skip + take], count
log_manager = MagicMock()
log_manager.get_log_entries = MagicMock(side_effect=get_log_entries)
# noinspection PyTypeChecker
execute_logs = ExecuteLogs(log_manager)
self.api.add_route('/api/execute/logs', execute_logs)
body = self.simulate_request('/api/execute/logs', query_string='take=10', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries[0:10], result['data'])
self.assertEqual(count, result['count'])
body = self.simulate_request('/api/execute/logs', query_string='take=10&skip=0', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries[0:10], result['data'])
self.assertEqual(count, result['count'])
body = self.simulate_request('/api/execute/logs', query_string='take=10&skip=10', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries[10:20], result['data'])
self.assertEqual(count, result['count'])
body = self.simulate_request('/api/execute/logs', query_string='take=10&skip=20', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
# assume that count is less then 30
self.assertEqual(entries[20:count], result['data'])
self.assertEqual(count, result['count'])
def test_bad_requests(self):
entries = [{}, {}, {}]
count = 3
log_manager = MagicMock()
log_manager.get_log_entries = MagicMock(return_value=(entries, count))
# noinspection PyTypeChecker
execute_logs = ExecuteLogs(log_manager)
self.api.add_route('/api/execute/logs', execute_logs)
self.simulate_request('/api/execute/logs')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take is required')
self.simulate_request('/api/execute/logs', query_string='take=abcd')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be int')
self.simulate_request('/api/execute/logs', query_string='take=10&skip=abcd')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'skip should be int')
self.simulate_request('/api/execute/logs', query_string='take=101')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be less or equal to 100')
self.simulate_request('/api/execute/logs', query_string='take=-10')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be greater than 0')
self.simulate_request('/api/execute/logs', query_string='take=0')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be greater than 0')
self.simulate_request('/api/execute/logs', query_string='take=10&skip=-1')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'skip should be greater or equal to 0')
| 38.155738
| 109
| 0.67884
| 596
| 4,655
| 5.159396
| 0.151007
| 0.078699
| 0.068293
| 0.085854
| 0.846179
| 0.846179
| 0.835447
| 0.835447
| 0.835447
| 0.772033
| 0
| 0.015127
| 0.190548
| 4,655
| 121
| 110
| 38.471074
| 0.800955
| 0.03072
| 0
| 0.546667
| 0
| 0
| 0.170439
| 0
| 0
| 0
| 0
| 0
| 0.36
| 1
| 0.053333
| false
| 0
| 0.093333
| 0.013333
| 0.173333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e83d4a4a930feec475942c2348a3afef2148f193
| 2,281
|
py
|
Python
|
tests/features/sort_test.py
|
YouTwitFace/babi
|
3697e931aefbe09178fc0441d403c5040ecfc4cd
|
[
"MIT"
] | null | null | null |
tests/features/sort_test.py
|
YouTwitFace/babi
|
3697e931aefbe09178fc0441d403c5040ecfc4cd
|
[
"MIT"
] | null | null | null |
tests/features/sort_test.py
|
YouTwitFace/babi
|
3697e931aefbe09178fc0441d403c5040ecfc4cd
|
[
"MIT"
] | null | null | null |
import pytest
from testing.runner import and_exit
from testing.runner import trigger_command_mode
@pytest.fixture
def unsorted(tmpdir):
f = tmpdir.join('f')
f.write('d\nb\nc\na\n')
return f
def test_sort_entire_file(run, unsorted):
with run(str(unsorted)) as h, and_exit(h):
trigger_command_mode(h)
h.press_and_enter(':sort')
h.await_text('sorted!')
h.await_cursor_position(x=0, y=1)
h.press('^S')
assert unsorted.read() == 'a\nb\nc\nd\n'
def test_reverse_sort_entire_file(run, unsorted):
with run(str(unsorted)) as h, and_exit(h):
trigger_command_mode(h)
h.press_and_enter(':sort!')
h.await_text('sorted!')
h.await_cursor_position(x=0, y=1)
h.press('^S')
assert unsorted.read() == 'd\nc\nb\na\n'
def test_sort_selection(run, unsorted):
with run(str(unsorted)) as h, and_exit(h):
h.press('S-Down')
trigger_command_mode(h)
h.press_and_enter(':sort')
h.await_text('sorted!')
h.await_cursor_position(x=0, y=1)
h.press('^S')
assert unsorted.read() == 'b\nd\nc\na\n'
def test_reverse_sort_selection(run, unsorted):
with run(str(unsorted)) as h, and_exit(h):
h.press('Down')
h.press('S-Down')
trigger_command_mode(h)
h.press_and_enter(':sort!')
h.await_text('sorted!')
h.await_cursor_position(x=0, y=2)
h.press('^S')
assert unsorted.read() == 'd\nc\nb\na\n'
def test_sort_selection_does_not_include_eof(run, unsorted):
with run(str(unsorted)) as h, and_exit(h):
for _ in range(5):
h.press('S-Down')
trigger_command_mode(h)
h.press_and_enter(':sort')
h.await_text('sorted!')
h.await_cursor_position(x=0, y=1)
h.press('^S')
assert unsorted.read() == 'a\nb\nc\nd\n'
def test_sort_does_not_include_blank_line_after(run, tmpdir):
f = tmpdir.join('f')
f.write('b\na\n\nd\nc\n')
with run(str(f)) as h, and_exit(h):
h.press('S-Down')
h.press('S-Down')
trigger_command_mode(h)
h.press_and_enter(':sort')
h.await_text('sorted!')
h.await_cursor_position(x=0, y=1)
h.press('^S')
assert f.read() == 'a\nb\n\nd\nc\n'
| 27.817073
| 61
| 0.606313
| 368
| 2,281
| 3.548913
| 0.173913
| 0.082695
| 0.058959
| 0.045942
| 0.820827
| 0.803982
| 0.803982
| 0.767228
| 0.767228
| 0.753446
| 0
| 0.007403
| 0.230162
| 2,281
| 81
| 62
| 28.160494
| 0.736333
| 0
| 0
| 0.692308
| 0
| 0
| 0.097326
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 1
| 0.107692
| false
| 0
| 0.046154
| 0
| 0.169231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e89e6ea0d75b7f30db55d51788f82a3762301c81
| 81
|
py
|
Python
|
beluga/optimlib/__init__.py
|
dHannasch/beluga
|
519e1ca2a43a86bc47737c45484288b2bacc1338
|
[
"MIT"
] | null | null | null |
beluga/optimlib/__init__.py
|
dHannasch/beluga
|
519e1ca2a43a86bc47737c45484288b2bacc1338
|
[
"MIT"
] | null | null | null |
beluga/optimlib/__init__.py
|
dHannasch/beluga
|
519e1ca2a43a86bc47737c45484288b2bacc1338
|
[
"MIT"
] | null | null | null |
from .optimlib import *
from .indirect import *
from .diffyg_deprecated import *
| 20.25
| 32
| 0.777778
| 10
| 81
| 6.2
| 0.6
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 81
| 3
| 33
| 27
| 0.898551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3afbf0090bfd132657b99030b72abdd5eb7bc5b6
| 38
|
py
|
Python
|
tests/__init__.py
|
ShadowStalker13/TextStatistics
|
5535ffa8319c324af1c3444514b19c17dd088cb7
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
ShadowStalker13/TextStatistics
|
5535ffa8319c324af1c3444514b19c17dd088cb7
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
ShadowStalker13/TextStatistics
|
5535ffa8319c324af1c3444514b19c17dd088cb7
|
[
"MIT"
] | null | null | null |
from .Test import TextStatisticsTests
| 19
| 37
| 0.868421
| 4
| 38
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d724fbe81a1e80c8ceaf57bd96c710e78ca39f1f
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/numpy/typing/tests/data/pass/scalars.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/numpy/typing/tests/data/pass/scalars.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/numpy/typing/tests/data/pass/scalars.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/3e/db/5b/c251d36230455d3360c1ee199bd7490cb4a38c419b0ccc2f47d0725d23
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d73fb1732bf41af1581997a28a5e107c2880ef99
| 17,839
|
py
|
Python
|
reservations/tests/test_allocation_solver.py
|
Sukriva/tilavarauspalvelu-core
|
42443082f61a1f92fc8a9315806fafabf7f64513
|
[
"MIT"
] | null | null | null |
reservations/tests/test_allocation_solver.py
|
Sukriva/tilavarauspalvelu-core
|
42443082f61a1f92fc8a9315806fafabf7f64513
|
[
"MIT"
] | null | null | null |
reservations/tests/test_allocation_solver.py
|
Sukriva/tilavarauspalvelu-core
|
42443082f61a1f92fc8a9315806fafabf7f64513
|
[
"MIT"
] | null | null | null |
import datetime
import pytest
from applications.models import EventReservationUnit
from reservations.allocation_models import AllocationData
from reservations.allocation_solver import AllocationSolver
@pytest.mark.django_db
def test_when_matching_unit_in_application_and_application_round_can_be_allocated(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
scheduled_for_monday,
matching_event_reservation_unit,
):
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 1
assert (
solution[0].space_id
== application_with_reservation_units.application_round.reservation_units.all()[
0
].id
)
assert solution[0].event_id == recurring_application_event.id
assert solution[0].occurrence_id == scheduled_for_monday.id
assert solution[0].duration == datetime.timedelta(hours=1)
@pytest.mark.django_db
def test_non_matching_unit_in_application_and_application_round_can_not_be_allocated(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
scheduled_for_monday,
not_matching_event_reservation_unit,
):
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 0
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"events_per_week": 1,
"duration": 300,
"schedules": [{"day": 0}],
}
]
},
{
"events": [
{
"events_per_week": 1,
"duration": 300,
"schedules": [{"day": 0}],
}
]
},
{
"events": [
{
"events_per_week": 1,
"duration": 300,
"schedules": [{"day": 0}],
}
]
},
]
}
]
),
indirect=True,
)
def test_should_only_allocate_events_which_fit_within_capacity(
application_round_with_reservation_units, multiple_applications
):
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
# Open 10 hours each day, we have three events to allocate with 300 minutes= 5 hours duration each
assert len(solution) == 2
assert solution[0].duration == datetime.timedelta(hours=5)
assert solution[1].duration == datetime.timedelta(hours=5)
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"duration": 15,
"events_per_week": 1,
"schedules": [{"day": 0}, {"day": 1}, {"day": 2}],
}
]
}
]
}
]
),
indirect=True,
)
def test_should_only_give_requested_number_of_events(
application_round_with_reservation_units, multiple_applications
):
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
# Requested 1 event per week with 3 possible times
assert len(solution) == 1
assert solution[0].duration == datetime.timedelta(minutes=15)
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "10:30"}
],
}
]
}
]
}
]
),
indirect=True,
)
def test_should_not_allocate_if_given_timeframe_cant_contain_duration(
application_round_with_reservation_units, multiple_applications
):
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 0
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "10:30"},
{"day": 0, "start": "18:00", "end": "20:00"},
],
}
]
}
]
}
]
),
indirect=True,
)
def test_should_be_able_to_allocate_if_long_enough_slot_with_too_small_slot(
application_round_with_reservation_units, multiple_applications
):
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 1
assert len(solution) == 1
start_times = []
for sol in solution:
start_times.append(sol.begin)
assert start_times == [datetime.time(hour=18, minute=0)]
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "12:00"}
],
},
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "12:00"}
],
},
]
}
]
}
]
),
indirect=True,
)
def test_should_start_and_end_between_requested_times_and_not_overlap_in_space(
application_round_with_reservation_units, multiple_applications
):
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 2
start_times = []
end_times = []
for sol in solution:
start_times.append(sol.begin)
end_times.append(sol.end)
assert start_times == [
datetime.time(hour=10, minute=0),
datetime.time(hour=11, minute=0),
]
assert end_times == [
datetime.time(hour=11, minute=0),
datetime.time(hour=12, minute=0),
]
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "11:00"}
],
},
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "11:00"}
],
},
]
}
]
}
]
),
indirect=True,
)
def test_should_not_allocate_if_events_need_to_overlap(
application_round_with_reservation_units, multiple_applications
):
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 1
start_times = []
for sol in solution:
start_times.append(sol.begin)
assert start_times == [datetime.time(hour=10, minute=0)]
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "11:00"}
],
},
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "11:00"}
],
},
]
}
]
}
]
),
indirect=True,
)
def test_events_can_overlap_in_different_units(
application_round_with_reservation_units,
multiple_applications,
second_reservation_unit,
reservation_unit,
):
application_round_with_reservation_units.reservation_units.set(
[reservation_unit, second_reservation_unit]
)
for application in application_round_with_reservation_units.applications.all():
for event in application.application_events.all():
unit_one = event.event_reservation_units.all()[0]
unit_two = EventReservationUnit.objects.create(
priority=100,
application_event=event,
reservation_unit=second_reservation_unit,
)
event.event_reservation_units.set([unit_one, unit_two])
event.num_persons = 5
event.save()
application_round_with_reservation_units.save()
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 2
start_times = []
end_times = []
for sol in solution:
start_times.append(sol.begin)
end_times.append(sol.end)
assert start_times == [
datetime.time(hour=10, minute=0),
datetime.time(hour=10, minute=0),
]
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:18", "end": "12:00"}
],
}
]
}
]
}
]
),
indirect=True,
)
def test_should_allocate_with_15_minutes_precision_rounded_up(
application_round_with_reservation_units, multiple_applications
):
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 1
assert solution[0].begin == datetime.time(hour=10, minute=30)
assert solution[0].end == datetime.time(hour=11, minute=30)
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "11:00"}
],
},
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "11:00"}
],
},
]
}
]
}
]
),
indirect=True,
)
def test_should_restrict_allocation_by_unit_max_persons(
application_round_with_reservation_units,
multiple_applications,
second_reservation_unit,
reservation_unit,
):
application_round_with_reservation_units.reservation_units.set(
[reservation_unit, second_reservation_unit]
)
for application in application_round_with_reservation_units.applications.all():
for event in application.application_events.all():
unit_one = event.event_reservation_units.all()[0]
unit_two = EventReservationUnit.objects.create(
priority=100,
application_event=event,
reservation_unit=second_reservation_unit,
)
event.event_reservation_units.set([unit_one, unit_two])
event.save()
application_round_with_reservation_units.save()
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 1
start_times = []
end_times = []
for sol in solution:
start_times.append(sol.begin)
end_times.append(sol.end)
assert start_times == [datetime.time(hour=10, minute=0)]
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "11:00"}
],
}
]
}
]
}
]
),
indirect=True,
)
def test_should_allocate_when_unit_max_persons_is_none(
application_round_with_reservation_units,
multiple_applications,
reservation_unit,
):
for space in reservation_unit.spaces.all():
space.max_persons = None
space.save()
data = AllocationData(application_round=application_round_with_reservation_units)
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 1
assert solution[0].begin == datetime.time(hour=10, minute=0)
@pytest.mark.django_db
@pytest.mark.parametrize(
"multiple_applications",
(
[
{
"applications": [
{
"events": [
{
"duration": 60,
"events_per_week": 1,
"schedules": [
{"day": 0, "start": "10:00", "end": "11:00"}
],
}
]
}
]
}
]
),
indirect=True,
)
def test_should_allocate_when_event_num_persons_is_none(
application_round_with_reservation_units,
multiple_applications,
reservation_unit,
):
data = AllocationData(application_round=application_round_with_reservation_units)
for application in application_round_with_reservation_units.applications.all():
for event in application.application_events.all():
event.num_persons = None
event.save()
solver = AllocationSolver(allocation_data=data)
solution = solver.solve()
assert len(solution) == 1
assert solution[0].begin == datetime.time(hour=10, minute=0)
| 29.63289
| 102
| 0.479567
| 1,428
| 17,839
| 5.677171
| 0.095238
| 0.096707
| 0.088812
| 0.126187
| 0.874306
| 0.865425
| 0.841495
| 0.822006
| 0.797089
| 0.797089
| 0
| 0.027226
| 0.429677
| 17,839
| 601
| 103
| 29.682196
| 0.769609
| 0.008128
| 0
| 0.599609
| 0
| 0
| 0.073314
| 0.013057
| 0
| 0
| 0
| 0
| 0.060547
| 1
| 0.025391
| false
| 0
| 0.009766
| 0
| 0.035156
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d74b82548c593703cafa748dfd48f2a89970db9a
| 4,751
|
py
|
Python
|
bridges/tests/api/surveys_url_questions/test_surveys_url_questions_question_id_delete.py
|
pegasystems/building-bridges
|
1a278df62c56421ab08b9ad14395fe9bf57cd32f
|
[
"MIT"
] | 20
|
2021-04-14T13:03:49.000Z
|
2022-03-29T17:56:26.000Z
|
bridges/tests/api/surveys_url_questions/test_surveys_url_questions_question_id_delete.py
|
pegasystems/building-bridges
|
1a278df62c56421ab08b9ad14395fe9bf57cd32f
|
[
"MIT"
] | 50
|
2021-04-16T17:32:14.000Z
|
2022-03-04T12:27:37.000Z
|
bridges/tests/api/surveys_url_questions/test_surveys_url_questions_question_id_delete.py
|
pegasystems/building-bridges
|
1a278df62c56421ab08b9ad14395fe9bf57cd32f
|
[
"MIT"
] | 2
|
2021-07-23T01:52:38.000Z
|
2022-03-30T15:42:32.000Z
|
import json
from http import HTTPStatus
from mockupdb import MockupDB, go, Command, OpReply
from json import dumps
import datetime
from bridges.tests.api.basic_test import BasicTest
import bridges.api.logic
QUESTION_ENDPOINT = 'surveys/test-1/questions/'
class DeleteQuestionTest(BasicTest):
def test_normal(self):
future = self.make_future_delete_request(f'{QUESTION_ENDPOINT}{str(self.example_ids[1])}')
# get data about survey
self.mock_get_info_about_survey()
# find question
request = self.server.receives()
timestamp = datetime.datetime.now()
request.ok(cursor={'id': 0, 'firstBatch': [{
"_id": self.example_ids[0],
"title": "exampleTitle",
"description": "example_description",
"number": 1,
'author': {
"host": "localhost",
"cookie": "cookie"
},
"url": "example-url",
"date": timestamp,
"questions": [
{
"content": "example-content",
'author': {
"host": "localhost",
"cookie": "cookie"
},
"date": timestamp,
"votes": [],
"_id": self.example_ids[1]
}
]}]})
request = self.server.receives()
request.ok({'nModified': 1})
http_response = future()
self.assertEqual(http_response.status_code, HTTPStatus.NO_CONTENT)
def test_not_delete_comment_in_disabled_survey(self):
future = self.make_future_delete_request(f'{QUESTION_ENDPOINT}{str(self.example_ids[1])}')
# get data about survey
self.mock_get_info_about_survey(asking_questions_enabled=False)
http_response = future()
self.assertEqual(http_response.status_code, HTTPStatus.METHOD_NOT_ALLOWED)
def test_notFound(self):
future = self.make_future_delete_request(f'{QUESTION_ENDPOINT}{str(self.example_ids[0])}')
# get data about survey
self.mock_get_info_about_survey()
# find question
request = self.server.receives()
request.ok(cursor={'id': 0, 'firstBatch': []})
http_response = future()
self.assertEqual(http_response.status_code, HTTPStatus.NOT_FOUND)
def test_notAuthorized(self):
future = self.make_future_delete_request(f'{QUESTION_ENDPOINT}{str(self.example_ids[1])}')
# get data about survey
self.mock_get_info_about_survey()
# find question
request = self.server.receives()
timestamp = datetime.datetime.now()
request.ok(cursor={'id': 0, 'firstBatch': [{
"_id": self.example_ids[0],
"title": "exampleTitle",
"description": "example_description",
"number": 1,
"author": "localhost",
"url": "example-url",
"date": timestamp,
"questions": [
{
"content": "example-content",
'author': {"host": "NOT-MY-IP", "cookie": "cookie"},
"date": timestamp,
"votes": [],
"_id": self.example_ids[1]
}
]}]})
http_response = future()
self.assertEqual(http_response.status_code, HTTPStatus.UNAUTHORIZED)
def test_Forbidden(self):
future = self.make_future_delete_request(f'{QUESTION_ENDPOINT}{str(self.example_ids[1])}')
# get data about survey
self.mock_get_info_about_survey()
# find question
request = self.server.receives()
timestamp = datetime.datetime.now()
request.ok(cursor={'id': 0, 'firstBatch': [{
"_id": self.example_ids[0],
"title": "exampleTitle",
"description": "example_description",
"number": 1,
'author': {"host": "localhost", "cookie": "cookie"},
"url": "example-url",
"date": timestamp,
"questions": [
{
"content": "example-content",
'author': {"host": "localhost", "cookie": "cookie"},
"date": timestamp,
"votes": [
{
'author': {"host": "localhost", "cookie": "cookie"},
"upvote": True,
"date": timestamp
}
],
"_id": self.example_ids[1],
}
]}]})
http_response = future()
self.assertEqual(http_response.status_code, HTTPStatus.FORBIDDEN)
| 38.008
| 98
| 0.529783
| 441
| 4,751
| 5.496599
| 0.199546
| 0.049917
| 0.063531
| 0.043317
| 0.804043
| 0.791254
| 0.765677
| 0.765677
| 0.765677
| 0.765677
| 0
| 0.006408
| 0.343086
| 4,751
| 124
| 99
| 38.314516
| 0.770266
| 0.03473
| 0
| 0.626168
| 0
| 0
| 0.189904
| 0.054633
| 0
| 0
| 0
| 0
| 0.046729
| 1
| 0.046729
| false
| 0
| 0.065421
| 0
| 0.121495
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d76e2ff6886ecff60da674a05e7693fe4aecf301
| 165
|
py
|
Python
|
mdgenerate/__init__.py
|
nielsmde/mdgenerate
|
6633f6d8f5255620ba8ec5169509b99a31b03ae5
|
[
"MIT"
] | 1
|
2020-11-14T18:54:43.000Z
|
2020-11-14T18:54:43.000Z
|
mdgenerate/__init__.py
|
nielsmde/mdgenerate
|
6633f6d8f5255620ba8ec5169509b99a31b03ae5
|
[
"MIT"
] | null | null | null |
mdgenerate/__init__.py
|
nielsmde/mdgenerate
|
6633f6d8f5255620ba8ec5169509b99a31b03ae5
|
[
"MIT"
] | null | null | null |
__version__ = '1.0'
from .mdgenerate import grompp, submit, process
from .confine import generate_spherical_water, generate_slit_water, generate_cylindrical_water
| 27.5
| 94
| 0.836364
| 21
| 165
| 6.095238
| 0.714286
| 0.203125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 0.10303
| 165
| 5
| 95
| 33
| 0.851351
| 0
| 0
| 0
| 1
| 0
| 0.018293
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d78d82dccc7dd497150cc32018e4464d39163867
| 81
|
py
|
Python
|
gui/screens/paymentpage.py
|
tonymorony/ChannelsCC-GUI
|
07df3706f8a250738311773eaf130fd8ebced64a
|
[
"MIT"
] | 1
|
2018-12-12T12:18:57.000Z
|
2018-12-12T12:18:57.000Z
|
gui/screens/paymentpage.py
|
tonymorony/ChannelsCC-GUI
|
07df3706f8a250738311773eaf130fd8ebced64a
|
[
"MIT"
] | null | null | null |
gui/screens/paymentpage.py
|
tonymorony/ChannelsCC-GUI
|
07df3706f8a250738311773eaf130fd8ebced64a
|
[
"MIT"
] | null | null | null |
from kivy.uix.screenmanager import Screen
class PaymentPage(Screen):
pass
| 11.571429
| 41
| 0.765432
| 10
| 81
| 6.2
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17284
| 81
| 6
| 42
| 13.5
| 0.925373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
d790c36d08ce6cd37655f9644725b32f183e0b56
| 222
|
py
|
Python
|
src/baboon_tracking/mixins/intersected_frames_mixin.py
|
radioactivebean0/baboon-tracking
|
062351c514073aac8e1207b8b46ca89ece987928
|
[
"MIT"
] | 6
|
2019-07-15T19:10:59.000Z
|
2022-02-01T04:25:26.000Z
|
src/baboon_tracking/mixins/intersected_frames_mixin.py
|
radioactivebean0/baboon-tracking
|
062351c514073aac8e1207b8b46ca89ece987928
|
[
"MIT"
] | 86
|
2019-07-02T17:59:46.000Z
|
2022-02-01T23:23:08.000Z
|
src/baboon_tracking/mixins/intersected_frames_mixin.py
|
radioactivebean0/baboon-tracking
|
062351c514073aac8e1207b8b46ca89ece987928
|
[
"MIT"
] | 7
|
2019-10-16T12:58:21.000Z
|
2022-03-08T00:31:32.000Z
|
"""
Mixin for returning the intersected frames.
"""
class IntersectedFramesMixin:
"""
Mixin for returning the intersected frames.
"""
def __init__(self):
self.intersected_frames = []
| 17.076923
| 48
| 0.621622
| 20
| 222
| 6.65
| 0.55
| 0.383459
| 0.255639
| 0.300752
| 0.556391
| 0.556391
| 0
| 0
| 0
| 0
| 0
| 0
| 0.279279
| 222
| 12
| 49
| 18.5
| 0.83125
| 0.391892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
ad9280cc418fc6bc4397b0f0c9410e5880823a62
| 307
|
py
|
Python
|
fairing/backend/kubernetes/__init__.py
|
cheyang/fairing
|
51083730090874b188c001d06b47e3fa817a321a
|
[
"Apache-2.0"
] | null | null | null |
fairing/backend/kubernetes/__init__.py
|
cheyang/fairing
|
51083730090874b188c001d06b47e3fa817a321a
|
[
"Apache-2.0"
] | null | null | null |
fairing/backend/kubernetes/__init__.py
|
cheyang/fairing
|
51083730090874b188c001d06b47e3fa817a321a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from kubernetes import client, config
from .manager import KubeManager, TF_JOB_VERSION
| 34.111111
| 48
| 0.879479
| 40
| 307
| 6.15
| 0.525
| 0.203252
| 0.325203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100977
| 307
| 8
| 49
| 38.375
| 0.891304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.875
| 0
| 0.875
| 0.125
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d10264878891853fa9eeaa591080c7292400c4ed
| 4,057
|
py
|
Python
|
test/unit/test_ensureloggroup.py
|
jlhood/json-lambda-logs-to-kinesis-firehose
|
15cd6120c578fc751f97d4209b66bd06713b1682
|
[
"MIT"
] | 2
|
2019-10-25T18:21:24.000Z
|
2021-03-17T13:51:07.000Z
|
test/unit/test_ensureloggroup.py
|
jlhood/json-lambda-logs-to-kinesis-firehose
|
15cd6120c578fc751f97d4209b66bd06713b1682
|
[
"MIT"
] | null | null | null |
test/unit/test_ensureloggroup.py
|
jlhood/json-lambda-logs-to-kinesis-firehose
|
15cd6120c578fc751f97d4209b66bd06713b1682
|
[
"MIT"
] | 1
|
2020-01-21T22:29:00.000Z
|
2020-01-21T22:29:00.000Z
|
import pytest
import botocore
import ensureloggroup
LOG_GROUP_NAME = 'myLogGroup'
PHYSICAL_RESOURCE_ID = 'someUUID'
@pytest.fixture
def mock_cw_logs(mocker):
mocker.patch.object(ensureloggroup, 'CW_LOGS')
return ensureloggroup.CW_LOGS
def test_create_log_group_no_exist(mock_cw_logs, mocker):
mocker.patch.object(ensureloggroup, 'uuid')
ensureloggroup.uuid.uuid4.return_value = PHYSICAL_RESOURCE_ID
response = ensureloggroup.create(_mock_event(), None)
assert response == {
'Status': 'SUCCESS',
'PhysicalResourceId': PHYSICAL_RESOURCE_ID,
'Data': {
'LogGroupName': LOG_GROUP_NAME
}
}
ensureloggroup.uuid.uuid4.assert_called()
ensureloggroup.CW_LOGS.create_log_group.assert_called_with(
logGroupName=LOG_GROUP_NAME
)
def test_create_log_group_already_exists(mock_cw_logs, mocker):
mocker.patch.object(ensureloggroup, 'uuid')
ensureloggroup.uuid.uuid4.return_value = PHYSICAL_RESOURCE_ID
ensureloggroup.CW_LOGS.create_log_group.side_effect = botocore.exceptions.ClientError(
{
'Error': {
'Code': 'ResourceAlreadyExistsException'
}
},
None
)
response = ensureloggroup.create(_mock_event(), None)
assert response == {
'Status': 'SUCCESS',
'PhysicalResourceId': PHYSICAL_RESOURCE_ID,
'Data': {
'LogGroupName': LOG_GROUP_NAME
}
}
ensureloggroup.uuid.uuid4.assert_called()
ensureloggroup.CW_LOGS.create_log_group.assert_called_with(
logGroupName=LOG_GROUP_NAME
)
def test_create_log_group_other_error(mock_cw_logs, mocker):
mocker.patch.object(ensureloggroup, 'uuid')
ensureloggroup.uuid.uuid4.return_value = PHYSICAL_RESOURCE_ID
ensureloggroup.CW_LOGS.create_log_group.side_effect = botocore.exceptions.ClientError(
{
'Error': {
'Code': 'SomethingElse'
}
},
None
)
with pytest.raises(botocore.exceptions.ClientError):
ensureloggroup.create(_mock_event(), None)
def test_update_log_group_no_exist(mock_cw_logs, mocker):
response = ensureloggroup.update(_mock_event(), None)
assert response == {
'Status': 'SUCCESS',
'PhysicalResourceId': PHYSICAL_RESOURCE_ID,
'Data': {
'LogGroupName': LOG_GROUP_NAME
}
}
ensureloggroup.CW_LOGS.create_log_group.assert_called_with(
logGroupName=LOG_GROUP_NAME
)
def test_update_log_group_already_exists(mock_cw_logs, mocker):
ensureloggroup.CW_LOGS.create_log_group.side_effect = botocore.exceptions.ClientError(
{
'Error': {
'Code': 'ResourceAlreadyExistsException'
}
},
None
)
response = ensureloggroup.update(_mock_event(), None)
assert response == {
'Status': 'SUCCESS',
'PhysicalResourceId': PHYSICAL_RESOURCE_ID,
'Data': {
'LogGroupName': LOG_GROUP_NAME
}
}
ensureloggroup.CW_LOGS.create_log_group.assert_called_with(
logGroupName=LOG_GROUP_NAME
)
def test_update_log_group_other_error(mock_cw_logs, mocker):
ensureloggroup.CW_LOGS.create_log_group.side_effect = botocore.exceptions.ClientError(
{
'Error': {
'Code': 'SomethingElse'
}
},
None
)
with pytest.raises(botocore.exceptions.ClientError):
ensureloggroup.update(_mock_event(), None)
def test_delete(mock_cw_logs, mocker):
response = ensureloggroup.delete(_mock_event(), None)
assert response == {
'Status': 'SUCCESS',
'PhysicalResourceId': PHYSICAL_RESOURCE_ID,
'Data': {
'LogGroupName': None
}
}
ensureloggroup.CW_LOGS.create_log_group.assert_not_called()
def _mock_event():
return {
'PhysicalResourceId': PHYSICAL_RESOURCE_ID,
'ResourceProperties': {
'LogGroupName': LOG_GROUP_NAME
}
}
| 26.00641
| 90
| 0.655903
| 402
| 4,057
| 6.243781
| 0.136816
| 0.079681
| 0.066932
| 0.086056
| 0.895219
| 0.866932
| 0.851793
| 0.835857
| 0.780478
| 0.780478
| 0
| 0.001639
| 0.248213
| 4,057
| 155
| 91
| 26.174194
| 0.821311
| 0
| 0
| 0.533333
| 0
| 0
| 0.108948
| 0.014789
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.075
| false
| 0
| 0.025
| 0.008333
| 0.116667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7eae7709ac93a4f431dc888f7a47b2f44f85601c
| 2,775
|
py
|
Python
|
entente/landmarks/test_symmetrize_landmarks.py
|
metabolize/entente
|
c1b16bb7c7fb83b31db4e8ddaf65f1504374fe7a
|
[
"MIT"
] | 4
|
2019-05-09T17:11:58.000Z
|
2022-01-28T20:27:39.000Z
|
entente/landmarks/test_symmetrize_landmarks.py
|
metabolize/entente
|
c1b16bb7c7fb83b31db4e8ddaf65f1504374fe7a
|
[
"MIT"
] | 94
|
2018-10-02T15:45:55.000Z
|
2021-07-14T14:20:38.000Z
|
entente/landmarks/test_symmetrize_landmarks.py
|
metabolize/entente
|
c1b16bb7c7fb83b31db4e8ddaf65f1504374fe7a
|
[
"MIT"
] | 3
|
2019-01-21T00:59:24.000Z
|
2022-01-28T20:26:28.000Z
|
from entente.landmarks.symmetrize_landmarks import (
symmetrize_landmarks_using_plane,
symmetrize_landmarks_using_topology,
)
import numpy as np
from polliwog import Plane
import pytest
from vg.compat import v1 as vg
from ..test_symmetry import create_seat_and_arm_mesh
def test_symmetrize_landmarks_using_plane():
original = np.array([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])
symmetrized = symmetrize_landmarks_using_plane(Plane.yz, original)
np.testing.assert_allclose(symmetrized, original, atol=1)
mirrored = np.copy(original)
mirrored[:, 0] = -mirrored[:, 0]
np.testing.assert_allclose(np.flipud(symmetrized), mirrored, atol=1)
distances_to_original = vg.euclidean_distance(symmetrized, original)
distances_to_mirrored = vg.euclidean_distance(np.flipud(symmetrized), mirrored)
np.testing.assert_allclose(distances_to_original, distances_to_mirrored, atol=1e-1)
def test_symmetrize_landmarks_using_plane_non_plane():
original = np.array([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])
with pytest.raises(ValueError, match=r"plane_of_symmetry should be a Plane"):
symmetrize_landmarks_using_plane("not_a_plane", original)
def test_symmetrize_landmarks_using_topology():
mesh = create_seat_and_arm_mesh()
original = np.array([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])
symmetrized = symmetrize_landmarks_using_topology(
mesh, Plane.yz, original, atol=1e-1
)
np.testing.assert_allclose(symmetrized, original, atol=1)
mirrored = np.copy(original)
mirrored[:, 0] = -mirrored[:, 0]
np.testing.assert_allclose(np.flipud(symmetrized), mirrored, atol=1)
distances_to_original = vg.euclidean_distance(symmetrized, original)
distances_to_mirrored = vg.euclidean_distance(np.flipud(symmetrized), mirrored)
np.testing.assert_allclose(distances_to_original, distances_to_mirrored, atol=1e-1)
def test_symmetrize_landmarks_using_topology_asymmetrical():
mesh = create_seat_and_arm_mesh().translated(np.array([50.0, 0.0, 0.0]))
original = np.array([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])
with pytest.raises(
ValueError, match=r"Some landmarks are near triangles which are not mirrored"
):
symmetrize_landmarks_using_topology(mesh, Plane.yz, original, atol=1e-1)
def test_symmetrize_landmarks_using_topology_non_plane():
mesh = create_seat_and_arm_mesh().translated(np.array([50.0, 0.0, 0.0]))
original = np.array([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])
with pytest.raises(ValueError, match=r"plane_of_symmetry should be a Plane"):
symmetrize_landmarks_using_topology(mesh, "not_a_plane", original, atol=1e-1)
| 41.41791
| 87
| 0.742703
| 392
| 2,775
| 5.007653
| 0.181122
| 0.125828
| 0.146714
| 0.114111
| 0.846154
| 0.811004
| 0.760571
| 0.760571
| 0.760571
| 0.736628
| 0
| 0.086522
| 0.133694
| 2,775
| 66
| 88
| 42.045455
| 0.730033
| 0
| 0
| 0.479167
| 0
| 0
| 0.053333
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.104167
| false
| 0
| 0.125
| 0
| 0.229167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7eb0a2c107a5cbc760a30b88c516867ccf8a958e
| 168
|
py
|
Python
|
molsysmt/tools/molsysmt_TrajectoryDict/__init__.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tools/molsysmt_TrajectoryDict/__init__.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tools/molsysmt_TrajectoryDict/__init__.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
from .is_molsysmt_TrajectoryDict import is_molsysmt_TrajectoryDict
from .to_molsysmt_Trajectory import to_molsysmt_Trajectory
from .to_file_trjpk import to_file_trjpk
| 33.6
| 66
| 0.904762
| 24
| 168
| 5.833333
| 0.375
| 0.142857
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077381
| 168
| 4
| 67
| 42
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7eb149f43a85f6811374fedaefdf80f0b23944ec
| 34
|
py
|
Python
|
python/eda/eda/components/_3M/__init__.py
|
32bitmicro/EDA
|
476a7f6dda23a494788bfdfaa27dff7082a80d6d
|
[
"BSD-3-Clause"
] | 1
|
2019-06-05T20:01:19.000Z
|
2019-06-05T20:01:19.000Z
|
python/eda/eda/components/_3M/__init__.py
|
32bitmicro/EDA
|
476a7f6dda23a494788bfdfaa27dff7082a80d6d
|
[
"BSD-3-Clause"
] | null | null | null |
python/eda/eda/components/_3M/__init__.py
|
32bitmicro/EDA
|
476a7f6dda23a494788bfdfaa27dff7082a80d6d
|
[
"BSD-3-Clause"
] | null | null | null |
from eda.components._3M import *
| 11.333333
| 32
| 0.764706
| 5
| 34
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 0.147059
| 34
| 2
| 33
| 17
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0e29a0d748034b8487d7829d44ae35c8834a6572
| 481
|
py
|
Python
|
demos/workshop-demo/helpers.py
|
v-jaswel/cognitive-services-personalizer-samples
|
3c476ed5345ccb33a25321358213e30ccd83269b
|
[
"MIT"
] | 44
|
2019-05-07T03:12:53.000Z
|
2022-03-22T19:30:35.000Z
|
demos/workshop-demo/helpers.py
|
v-jaswel/cognitive-services-personalizer-samples
|
3c476ed5345ccb33a25321358213e30ccd83269b
|
[
"MIT"
] | 27
|
2019-07-05T20:04:25.000Z
|
2019-08-05T18:21:58.000Z
|
demos/workshop-demo/helpers.py
|
v-jaswel/cognitive-services-personalizer-samples
|
3c476ed5345ccb33a25321358213e30ccd83269b
|
[
"MIT"
] | 65
|
2019-05-03T18:20:18.000Z
|
2022-03-16T10:48:18.000Z
|
class SlidingAverage:
def __init__(self, window_size):
self.index = 0
self.values = [0] * window_size
def _previous(self):
return self.values[(self.index + len(self.values) - 1) % len(self.values)]
def update(self, value):
self.values[self.index] = self._previous() + value
self.index = (self.index + 1) % len(self.values)
def get(self):
return (self._previous() - self.values[self.index]) / (len(self.values) - 1)
| 34.357143
| 84
| 0.611227
| 63
| 481
| 4.52381
| 0.269841
| 0.280702
| 0.182456
| 0.2
| 0.347368
| 0.231579
| 0.231579
| 0.231579
| 0
| 0
| 0
| 0.013624
| 0.237006
| 481
| 14
| 84
| 34.357143
| 0.762943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0
| 0.181818
| 0.636364
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7ec052a15a717ccc19f3da41a7733eddf759704d
| 4,588
|
py
|
Python
|
tests/autoscaling/test_pause_service_autoscaler.py
|
sobolevn/paasta
|
8b87e0b13816c09b3d063b6d3271e6c7627fd264
|
[
"Apache-2.0"
] | 1,711
|
2015-11-10T18:04:56.000Z
|
2022-03-23T08:53:16.000Z
|
tests/autoscaling/test_pause_service_autoscaler.py
|
sobolevn/paasta
|
8b87e0b13816c09b3d063b6d3271e6c7627fd264
|
[
"Apache-2.0"
] | 1,689
|
2015-11-10T17:59:04.000Z
|
2022-03-31T20:46:46.000Z
|
tests/autoscaling/test_pause_service_autoscaler.py
|
sobolevn/paasta
|
8b87e0b13816c09b3d063b6d3271e6c7627fd264
|
[
"Apache-2.0"
] | 267
|
2015-11-10T19:17:16.000Z
|
2022-02-08T20:59:52.000Z
|
import mock
import paasta_tools.paastaapi.models as paastamodels
from paasta_tools.autoscaling.pause_service_autoscaler import (
delete_service_autoscale_pause_time,
)
from paasta_tools.autoscaling.pause_service_autoscaler import (
get_service_autoscale_pause_time,
)
from paasta_tools.autoscaling.pause_service_autoscaler import (
update_service_autoscale_pause_time,
)
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client", autospec=True)
def test_get_service_autoscale_pause_time_error(mock_client):
mock_client.get_paasta_oapi_client.return_value = None
return_code = get_service_autoscale_pause_time("cluster1")
assert return_code == 1
mock_client.get_paasta_oapi_client.assert_called_with(
cluster="cluster1", http_res=True
)
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(default=mock_api)
mock_api.get_service_autoscaler_pause.return_value = (
None,
500,
None,
)
return_code = get_service_autoscale_pause_time("cluster1")
assert return_code == 2
@mock.patch("builtins.print", autospec=True)
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.time", autospec=True)
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client", autospec=True)
def test_get_service_autoscale_pause_time_not(mock_client, mock_time, mock_print):
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(default=mock_api)
mock_api.get_service_autoscaler_pause.return_value = ("3", 200, None)
mock_time.time.return_value = 4
return_code = get_service_autoscale_pause_time("cluster1")
mock_print.assert_called_with("Service autoscaler is not paused")
assert return_code == 0
@mock.patch(
"paasta_tools.autoscaling.pause_service_autoscaler.print_paused_message",
autospec=True,
)
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.time", autospec=True)
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client", autospec=True)
def test_get_service_autoscale_pause_time_paused(
mock_client, mock_time, mock_print_paused_message
):
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(default=mock_api)
mock_api.get_service_autoscaler_pause.return_value = ("3", 200, None)
mock_time.time.return_value = 2
return_code = get_service_autoscale_pause_time("cluster1")
mock_print_paused_message.assert_called_with(3.0)
assert return_code == 0
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client", autospec=True)
def test_update_service_autoscale_pause_time(mock_client):
mock_client.get_paasta_oapi_client.return_value = None
return_code = update_service_autoscale_pause_time("cluster1", "2")
assert return_code == 1
mock_client.get_paasta_oapi_client.assert_called_with(
cluster="cluster1", http_res=True
)
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(default=mock_api)
mock_api.update_service_autoscaler_pause = mock_update = mock.Mock()
mock_update.return_value = (None, 500, None)
return_code = update_service_autoscale_pause_time("cluster1", "3")
mock_update.assert_called_once_with(
paastamodels.InlineObject(minutes=3), _return_http_data_only=False
)
assert return_code == 2
mock_update.return_value = (None, 200, None)
return_code = update_service_autoscale_pause_time("cluster1", "2")
assert return_code == 0
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client", autospec=True)
@mock.patch("paasta_tools.paastaapi.apis.DefaultApi", autospec=True)
def test_delete_service_autoscale_pause_time(mock_default_api, mock_client):
mock_client.get_paasta_oapi_client.return_value = None
return_code = delete_service_autoscale_pause_time("cluster1")
assert return_code == 1
mock_client.get_paasta_oapi_client.assert_called_with(
cluster="cluster1", http_res=True
)
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(default=mock_api)
mock_api.delete_service_autoscaler_pause = mock_delete = mock.Mock()
mock_delete.return_value = (None, 500, None)
return_code = delete_service_autoscale_pause_time("cluster1")
mock_delete.assert_called_once_with(_return_http_data_only=False)
assert return_code == 2
mock_delete.return_value = (None, 200, None)
return_code = delete_service_autoscale_pause_time("cluster1")
assert return_code == 0
| 41.333333
| 86
| 0.787053
| 629
| 4,588
| 5.292528
| 0.09539
| 0.060078
| 0.113548
| 0.135176
| 0.870832
| 0.8465
| 0.804446
| 0.780715
| 0.764494
| 0.732652
| 0
| 0.013443
| 0.124455
| 4,588
| 110
| 87
| 41.709091
| 0.815285
| 0
| 0
| 0.553191
| 0
| 0
| 0.141892
| 0.108108
| 0
| 0
| 0
| 0
| 0.180851
| 1
| 0.053191
| false
| 0
| 0.053191
| 0
| 0.106383
| 0.06383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7d2491b9e7e51f282d5d9e142522bd76cfe79d2c
| 83
|
py
|
Python
|
transcription/alex_da-default/ml/exceptions.py
|
UFAL-DSG/django-crowdflower-annotations
|
76f5e35dc3029030b73a7bebd54e0f46474958c1
|
[
"Apache-2.0"
] | 11
|
2015-05-22T08:07:05.000Z
|
2019-11-13T12:29:52.000Z
|
transcription/alex_da-default/ml/exceptions.py
|
UFAL-DSG/django-crowdflower-annotations
|
76f5e35dc3029030b73a7bebd54e0f46474958c1
|
[
"Apache-2.0"
] | null | null | null |
transcription/alex_da-default/ml/exceptions.py
|
UFAL-DSG/django-crowdflower-annotations
|
76f5e35dc3029030b73a7bebd54e0f46474958c1
|
[
"Apache-2.0"
] | null | null | null |
from alex_da import AlexException
class NBListException(AlexException):
pass
| 13.833333
| 37
| 0.807229
| 9
| 83
| 7.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156627
| 83
| 5
| 38
| 16.6
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
7d4ddd1d1e30f1760d2abcb8cc1351b76275d353
| 829
|
py
|
Python
|
relfs/relfs/fuse/symlink.py
|
matus-chochlik/various
|
2a9f5eddd964213f7d1e1ce8328e2e0b2a8e998b
|
[
"MIT"
] | 1
|
2020-10-25T12:28:50.000Z
|
2020-10-25T12:28:50.000Z
|
relfs/relfs/fuse/symlink.py
|
matus-chochlik/various
|
2a9f5eddd964213f7d1e1ce8328e2e0b2a8e998b
|
[
"MIT"
] | null | null | null |
relfs/relfs/fuse/symlink.py
|
matus-chochlik/various
|
2a9f5eddd964213f7d1e1ce8328e2e0b2a8e998b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
#------------------------------------------------------------------------------#
import os
import time
import fuse
import errno
from .item import RelFuseItem
#------------------------------------------------------------------------------#
class Symlink(RelFuseItem):
# --------------------------------------------------------------------------
def __init__(self, path_getter):
RelFuseItem.__init__(self)
self._path_getter = path_getter
# --------------------------------------------------------------------------
def _get_mode(self):
return 0o120440
# --------------------------------------------------------------------------
def readlink(self):
return self._path_getter()
#------------------------------------------------------------------------------#
| 34.541667
| 80
| 0.289505
| 44
| 829
| 5.090909
| 0.522727
| 0.178571
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010959
| 0.119421
| 829
| 23
| 81
| 36.043478
| 0.29589
| 0.568154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.384615
| 0.153846
| 0.846154
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
70c628eb0e5f52cf95f887e936a852a57371a48a
| 6,297
|
py
|
Python
|
items.py
|
rullmann/bundlewrap-nfs-server
|
e2392c80c5c687d5f0306625dca8943164396953
|
[
"MIT"
] | null | null | null |
items.py
|
rullmann/bundlewrap-nfs-server
|
e2392c80c5c687d5f0306625dca8943164396953
|
[
"MIT"
] | null | null | null |
items.py
|
rullmann/bundlewrap-nfs-server
|
e2392c80c5c687d5f0306625dca8943164396953
|
[
"MIT"
] | null | null | null |
pkg_dnf = {
'nfs-utils': {},
'libnfsidmap': {},
}
svc_systemd = {
'nfs-server': {
'needs': ['pkg_dnf:nfs-utils'],
},
'rpcbind': {
'needs': ['pkg_dnf:nfs-utils'],
},
'rpc-statd': {
'needs': ['pkg_dnf:nfs-utils'],
},
'nfs-idmapd': {
'needs': ['pkg_dnf:nfs-utils'],
},
}
files = {}
actions = {
'nfs_export': {
'command': 'exportfs -a',
'triggered': True,
'needs': ['pkg_dnf:nfs-utils'],
},
}
for export in node.metadata['nfs-server']['exports']:
files['/etc/exports.d/{}'.format(export['alias'])] = {
'source': 'template',
'mode': '0644',
'content_type': 'mako',
'context': {
'export': export,
},
'needs': ['pkg_dnf:nfs-utils'],
'triggers': ['action:nfs_export', 'svc_systemd:nfs-server:restart', 'svc_systemd:rpcbind:restart'],
}
if node.has_bundle('firewalld'):
if node.metadata.get('nfs-server', {}).get('firewalld_permitted_zones'):
for zone in node.metadata.get('nfs-server', {}).get('firewalld_permitted_zones'):
actions['firewalld_add_nfs_zone_{}'.format(zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-service=nfs'.format(zone),
'unless': 'firewall-cmd --zone={} --list-services | grep nfs'.format(zone),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
actions['firewalld_add_mountd_zone_{}'.format(zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-service=mountd'.format(zone),
'unless': 'firewall-cmd --zone={} --list-services | grep mountd'.format(zone),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
actions['firewalld_add_rpc-bind_zone_{}'.format(zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-service=rpc-bind'.format(zone),
'unless': 'firewall-cmd --zone={} --list-services | grep rpc-bind'.format(zone),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
elif node.metadata.get('firewalld', {}).get('default_zone'):
default_zone = node.metadata.get('firewalld', {}).get('default_zone')
actions['firewalld_add_nfs_zone_{}'.format(default_zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-service=nfs'.format(default_zone),
'unless': 'firewall-cmd --zone={} --list-services | grep nfs'.format(default_zone),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
actions['firewalld_add_mountd_zone_{}'.format(default_zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-service=mountd'.format(default_zone),
'unless': 'firewall-cmd --zone={} --list-services | grep mountd'.format(default_zone),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
actions['firewalld_add_rpc-bind_zone_{}'.format(default_zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-service=rpc-bind'.format(default_zone),
'unless': 'firewall-cmd --zone={} --list-services | grep rpc-bind'.format(default_zone),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
elif node.metadata.get('firewalld', {}).get('custom_zones', False):
for interface in node.metadata['interfaces']:
custom_zone = node.metadata.get('interfaces', {}).get(interface).get('firewalld_zone')
actions['firewalld_add_nfs_zone_{}'.format(custom_zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-service=nfs'.format(custom_zone),
'unless': 'firewall-cmd --zone={} --list-services | grep nfs'.format(custom_zone),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
actions['firewalld_add_mountd_zone_{}'.format(custom_zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-service=mountd'.format(custom_zone),
'unless': 'firewall-cmd --zone={} --list-services | grep mountd'.format(custom_zone),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
actions['firewalld_add_rpc-bind_zone_{}'.format(custom_zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-service=rpc-bind'.format(custom_zone),
'unless': 'firewall-cmd --zone={} --list-services | grep rpc-bind'.format(custom_zone),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
else:
actions['firewalld_add_nfs'] = {
'command': 'firewall-cmd --permanent --add-service=nfs',
'unless': 'firewall-cmd --list-services | grep nfs',
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
actions['firewalld_add_mountd'] = {
'command': 'firewall-cmd --permanent --add-service=mountd',
'unless': 'firewall-cmd --list-services | grep mountd',
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
actions['firewalld_add_rpc-bind'] = {
'command': 'firewall-cmd --permanent --add-service=rpc-bind',
'unless': 'firewall-cmd --list-services | grep rpc-bind',
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
| 46.301471
| 107
| 0.550103
| 621
| 6,297
| 5.384863
| 0.112721
| 0.078947
| 0.059211
| 0.09689
| 0.842404
| 0.80622
| 0.736842
| 0.707835
| 0.707835
| 0.677931
| 0
| 0.000866
| 0.266635
| 6,297
| 135
| 108
| 46.644444
| 0.723257
| 0
| 0
| 0.323077
| 0
| 0
| 0.481658
| 0.117834
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
70f6376afcc4f51fb81d91a0923b3454f9a70bc9
| 49
|
py
|
Python
|
redditbot/bot/__init__.py
|
aatrubilin/subredditbot
|
adb761aab9e7c7fe075de8815cf46a4feb7aef4c
|
[
"MIT"
] | null | null | null |
redditbot/bot/__init__.py
|
aatrubilin/subredditbot
|
adb761aab9e7c7fe075de8815cf46a4feb7aef4c
|
[
"MIT"
] | null | null | null |
redditbot/bot/__init__.py
|
aatrubilin/subredditbot
|
adb761aab9e7c7fe075de8815cf46a4feb7aef4c
|
[
"MIT"
] | null | null | null |
from .mq_bot import MQBot
from . import handlers
| 16.333333
| 25
| 0.795918
| 8
| 49
| 4.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 49
| 2
| 26
| 24.5
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
70f9c76bd4d2170b44299e71149c19c36523b167
| 85
|
py
|
Python
|
src/helpers/__init__.py
|
micro-infrastructure/adaptor-srm2local
|
92c66753262f405c8466b5d37de305e6332859a2
|
[
"MIT"
] | 1
|
2020-01-17T09:20:01.000Z
|
2020-01-17T09:20:01.000Z
|
Services/core-xyz/src/helpers/__init__.py
|
recap/MicroInfrastructure
|
4e8baf6d2a29344b10d6d3d57d01fc24fef16342
|
[
"MIT"
] | null | null | null |
Services/core-xyz/src/helpers/__init__.py
|
recap/MicroInfrastructure
|
4e8baf6d2a29344b10d6d3d57d01fc24fef16342
|
[
"MIT"
] | null | null | null |
from helpers.b64 import base64_dict, base64_str
from helpers.json import json_respone
| 42.5
| 47
| 0.870588
| 14
| 85
| 5.071429
| 0.642857
| 0.309859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077922
| 0.094118
| 85
| 2
| 48
| 42.5
| 0.844156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cb3ac979514b4a3a688c2f38628a76682af0f174
| 27
|
py
|
Python
|
hugo2lunr/__init__.py
|
IMTorgDemo/Hugo2lunr
|
402f2c1b6604d03690041e6785f0feea303fa31d
|
[
"BSD-2-Clause"
] | null | null | null |
hugo2lunr/__init__.py
|
IMTorgDemo/Hugo2lunr
|
402f2c1b6604d03690041e6785f0feea303fa31d
|
[
"BSD-2-Clause"
] | null | null | null |
hugo2lunr/__init__.py
|
IMTorgDemo/Hugo2lunr
|
402f2c1b6604d03690041e6785f0feea303fa31d
|
[
"BSD-2-Clause"
] | null | null | null |
from .hugo2lunr import main
| 27
| 27
| 0.851852
| 4
| 27
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.111111
| 27
| 1
| 27
| 27
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cb764501b239d35138334de98c67a64b17005c0b
| 124
|
py
|
Python
|
mxmrt/unittest/main.py
|
KunyFox/MRTv2
|
7b7a156be6f99082964227babd9e157708255b2c
|
[
"Apache-2.0"
] | 6
|
2019-07-04T09:42:53.000Z
|
2021-12-28T13:19:48.000Z
|
mxmrt/unittest/main.py
|
KunyFox/MRTv2
|
7b7a156be6f99082964227babd9e157708255b2c
|
[
"Apache-2.0"
] | 4
|
2019-06-27T08:05:18.000Z
|
2021-09-09T18:59:11.000Z
|
cvm/unittest/main.py
|
CortexFoundation/tvm-cvm
|
d8941dc60a51dd27a6d2accc1eff2eced3b3640d
|
[
"Apache-2.0"
] | null | null | null |
import sys
from ops import *
from passes import *
if __name__ == "__main__":
unittest.main(argv=sys.argv, verbosity=5)
| 17.714286
| 45
| 0.717742
| 18
| 124
| 4.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009709
| 0.169355
| 124
| 6
| 46
| 20.666667
| 0.776699
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1dc9a61f70b05ce0822d3c8c9bb91ca0413eed53
| 38
|
py
|
Python
|
flask_server/__init__.py
|
LeiQiao/Parasite-Plugins
|
96a20819f2cf625f22e06be9dc03a997291e1fc6
|
[
"MIT"
] | null | null | null |
flask_server/__init__.py
|
LeiQiao/Parasite-Plugins
|
96a20819f2cf625f22e06be9dc03a997291e1fc6
|
[
"MIT"
] | null | null | null |
flask_server/__init__.py
|
LeiQiao/Parasite-Plugins
|
96a20819f2cf625f22e06be9dc03a997291e1fc6
|
[
"MIT"
] | null | null | null |
from .flask_server import FlaskServer
| 19
| 37
| 0.868421
| 5
| 38
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3831e61b84da801229f7e8e0186b751bd826f3e7
| 1,476
|
py
|
Python
|
Python_Basics/04_Nested_Conditional_Statements/07_Trade_Comissions.py
|
Dochko0/Python
|
e9612c4e842cfd3d9a733526cc7485765ef2238f
|
[
"MIT"
] | null | null | null |
Python_Basics/04_Nested_Conditional_Statements/07_Trade_Comissions.py
|
Dochko0/Python
|
e9612c4e842cfd3d9a733526cc7485765ef2238f
|
[
"MIT"
] | null | null | null |
Python_Basics/04_Nested_Conditional_Statements/07_Trade_Comissions.py
|
Dochko0/Python
|
e9612c4e842cfd3d9a733526cc7485765ef2238f
|
[
"MIT"
] | null | null | null |
town = input().lower()
sell_count = float(input())
if 0<=sell_count<=500:
if town == "sofia":
comission = sell_count*0.05
print(f'{comission:.2f}')
elif town=="varna":
comission = sell_count * 0.045
print(f'{comission:.2f}')
elif town == "plovdiv":
comission = sell_count * 0.055
print(f'{comission:.2f}')
else:
print('error')
elif 500<sell_count<=1000:
if town == "sofia":
comission = sell_count*0.07
print(f'{comission:.2f}')
elif town=="varna":
comission = sell_count * 0.075
print(f'{comission:.2f}')
elif town == "plovdiv":
comission = sell_count * 0.08
print(f'{comission:.2f}')
else:
print('error')
elif 1000<sell_count<=10000:
if town == "sofia":
comission = sell_count*0.08
print(f'{comission:.2f}')
elif town=="varna":
comission = sell_count * 0.10
print(f'{comission:.2f}')
elif town == "plovdiv":
comission = sell_count * 0.12
print(f'{comission:.2f}')
else:
print('error')
elif 10000 < sell_count:
if town == "sofia":
comission = sell_count*0.12
print(f'{comission:.2f}')
elif town=="varna":
comission = sell_count * 0.13
print(f'{comission:.2f}')
elif town == "plovdiv":
comission = sell_count * 0.145
print(f'{comission:.2f}')
else:
print('error')
else:
print('error')
| 27.849057
| 38
| 0.549458
| 185
| 1,476
| 4.291892
| 0.167568
| 0.192695
| 0.27204
| 0.287154
| 0.836272
| 0.836272
| 0.836272
| 0.693955
| 0.604534
| 0.503778
| 0
| 0.073055
| 0.285908
| 1,476
| 53
| 39
| 27.849057
| 0.680266
| 0
| 0
| 0.730769
| 0
| 0
| 0.184834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.326923
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6997766165f57ea6bc42b80127429fbbb1ee8145
| 1,330
|
py
|
Python
|
client/swagger_client/models/__init__.py
|
kakwa/certascale
|
0df8da0f518506500117152fd0e28ee3286949af
|
[
"MIT"
] | null | null | null |
client/swagger_client/models/__init__.py
|
kakwa/certascale
|
0df8da0f518506500117152fd0e28ee3286949af
|
[
"MIT"
] | null | null | null |
client/swagger_client/models/__init__.py
|
kakwa/certascale
|
0df8da0f518506500117152fd0e28ee3286949af
|
[
"MIT"
] | 2
|
2020-11-04T03:07:00.000Z
|
2020-11-05T08:14:33.000Z
|
# coding: utf-8
# flake8: noqa
"""
certascale API
Certascale API documentation # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from swagger_client.models.account_create_update import AccountCreateUpdate
from swagger_client.models.account_definition import AccountDefinition
from swagger_client.models.account_definition_list import AccountDefinitionList
from swagger_client.models.api_key import ApiKey
from swagger_client.models.api_key_list import ApiKeyList
from swagger_client.models.certificate import Certificate
from swagger_client.models.certificate_list import CertificateList
from swagger_client.models.certificate_payload import CertificatePayload
from swagger_client.models.default_error import DefaultError
from swagger_client.models.default_message import DefaultMessage
from swagger_client.models.domain import Domain
from swagger_client.models.domain_create_update import DomainCreateUpdate
from swagger_client.models.domain_list import DomainList
from swagger_client.models.notification import Notification
from swagger_client.models.notification_list import NotificationList
from swagger_client.models.notification_update import NotificationUpdate
| 39.117647
| 79
| 0.861654
| 167
| 1,330
| 6.634731
| 0.341317
| 0.158845
| 0.245487
| 0.33213
| 0.471119
| 0.124549
| 0
| 0
| 0
| 0
| 0
| 0.006639
| 0.093985
| 1,330
| 33
| 80
| 40.30303
| 0.912863
| 0.160902
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
69b6d586758415b51bd6222616a7281a5a3b5a77
| 32
|
py
|
Python
|
reth/reth/algorithm/pg/__init__.py
|
sosp2021/Reth
|
10c032f44a25049355ebdd97a2cb3299e8c3fb82
|
[
"MIT"
] | null | null | null |
reth/reth/algorithm/pg/__init__.py
|
sosp2021/Reth
|
10c032f44a25049355ebdd97a2cb3299e8c3fb82
|
[
"MIT"
] | 1
|
2021-08-10T02:58:58.000Z
|
2021-08-10T02:58:58.000Z
|
reth/reth/algorithm/pg/__init__.py
|
sosp2021/reth
|
10c032f44a25049355ebdd97a2cb3299e8c3fb82
|
[
"MIT"
] | null | null | null |
from .pg_solver import PGSolver
| 16
| 31
| 0.84375
| 5
| 32
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
38ddc03eb323bf6a2e78b80ee5d4426bd997d286
| 269
|
py
|
Python
|
snmpagent_unity/unity_impl/VolumeRaidLevels.py
|
factioninc/snmp-unity-agent
|
3525dc0fac60d1c784dcdd7c41693544bcbef843
|
[
"Apache-2.0"
] | 2
|
2019-03-01T11:14:59.000Z
|
2019-10-02T17:47:59.000Z
|
snmpagent_unity/unity_impl/VolumeRaidLevels.py
|
factioninc/snmp-unity-agent
|
3525dc0fac60d1c784dcdd7c41693544bcbef843
|
[
"Apache-2.0"
] | 2
|
2019-03-01T11:26:29.000Z
|
2019-10-11T18:56:54.000Z
|
snmpagent_unity/unity_impl/VolumeRaidLevels.py
|
factioninc/snmp-unity-agent
|
3525dc0fac60d1c784dcdd7c41693544bcbef843
|
[
"Apache-2.0"
] | 1
|
2019-10-03T21:09:17.000Z
|
2019-10-03T21:09:17.000Z
|
class VolumeRaidLevels(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_lun_raid_type(idx_name)
class VolumeRaidLevelsColumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_luns()
| 29.888889
| 55
| 0.747212
| 37
| 269
| 5.108108
| 0.459459
| 0.232804
| 0.116402
| 0.232804
| 0.328042
| 0.328042
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163569
| 269
| 8
| 56
| 33.625
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2a1e1cef18774e1b320c6420c1ddf3be36316c6b
| 18,009
|
py
|
Python
|
swiss_army_keras/_model_deeplab_v3_plus.py
|
waterviewsrl/swiss-army-keras
|
49578f1a45761229756a8adbfcf692728039dc3b
|
[
"MIT"
] | 1
|
2022-02-23T13:54:22.000Z
|
2022-02-23T13:54:22.000Z
|
swiss_army_keras/_model_deeplab_v3_plus.py
|
waterviewsrl/swiss-army-keras
|
49578f1a45761229756a8adbfcf692728039dc3b
|
[
"MIT"
] | null | null | null |
swiss_army_keras/_model_deeplab_v3_plus.py
|
waterviewsrl/swiss-army-keras
|
49578f1a45761229756a8adbfcf692728039dc3b
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from multiprocessing.spawn import prepare
from sklearn import preprocessing
from swiss_army_keras.layer_utils import *
from swiss_army_keras.activations import GELU, Snake
from swiss_army_keras._backbone_zoo import backbone_zoo, bach_norm_checker
from swiss_army_keras._model_unet_2d import UNET_left, UNET_right
from tensorflow.keras.layers import Input, BatchNormalization, Conv2D, AveragePooling2D, UpSampling2D, Concatenate, Activation
from tensorflow.keras.models import Model
from tensorflow.keras.initializers import HeNormal
from swiss_army_keras.utils import freeze_model
from tensorflow.nn import relu
import tensorflow as tf
shallow_resize_map = {0: 1, 1: 2, 2: 4, 3: 8, 4: 16, 5: 32}
def convolution_block(
block_input,
num_filters=256,
kernel_size=3,
dilation_rate=1,
padding="same",
use_bias=False,
):
x = Conv2D(
num_filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding="same",
use_bias=use_bias,
kernel_initializer=HeNormal(),
)(block_input)
x = BatchNormalization()(x)
return relu(x)
def depth_convolution_block(
block_input,
num_filters=256,
kernel_size=3,
dilation_rate=1,
padding="same",
use_bias=False,
stride=1,
depth_padding='same',
epsilon=1e-5
):
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(dilation_rate, dilation_rate),
padding=depth_padding, use_bias=False, name=f'depthwise_{dilation_rate}')(block_input)
x = BatchNormalization(
name=f'depthwise_BN__{dilation_rate}', epsilon=epsilon)(x)
x = Activation(relu)(x)
x = Conv2D(num_filters, (1, 1), padding='same',
use_bias=False, name=f'pointwise_{dilation_rate}')(x)
x = BatchNormalization(
name=f'pointwise_BN_{dilation_rate}', epsilon=epsilon)(x)
x = Activation(relu)(x)
return x
def DilatedSpatialPyramidPooling(dspp_input, atrous_rates, num_filters):
dims = dspp_input.shape
x = AveragePooling2D(pool_size=(dims[-3], dims[-2]))(dspp_input)
x = convolution_block(x, kernel_size=1, use_bias=True)
out_pool = UpSampling2D(
size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]), interpolation="bilinear",
)(x)
out_1 = convolution_block(
dspp_input, kernel_size=1, dilation_rate=1, num_filters=num_filters)
outputs = [out_pool, out_1]
for rate in atrous_rates:
out = depth_convolution_block(
dspp_input, kernel_size=3, dilation_rate=rate, num_filters=num_filters)
outputs.append(out)
x = Concatenate(axis=-1)(outputs)
output = convolution_block(x, kernel_size=1, num_filters=num_filters)
return output
def deeplab_v3_plus(input_tensor, n_labels, filter_num_down=[64, 128, 256, 512, 1024],
deep_layer=5, shallow_layer=2, num_filters_deep=256, num_filters_shallow=48, multiscale_factor=0, atrous_rates=[6, 12, 18],
stack_num_down=2, stack_num_up=1, activation='ReLU', batch_norm=False, pool=True, unpool=True,
backbone=None, weights='imagenet', freeze_backbone=True, freeze_batch_norm=True, name='deeplab_v3_plus'):
'''
The base of UNET 3+ with an optional ImagNet-trained backbone.
unet_3plus_2d_base(input_tensor, filter_num_down, filter_num_skip, filter_num_aggregate,
stack_num_down=2, stack_num_up=1, activation='ReLU', batch_norm=False, pool=True, unpool=True,
backbone=None, weights='imagenet', freeze_backbone=True, freeze_batch_norm=True, name='deeplab_v3_plus')
----------
Huang, H., Lin, L., Tong, R., Hu, H., Zhang, Q., Iwamoto, Y., Han, X., Chen, Y.W. and Wu, J., 2020.
UNet 3+: A Full-Scale Connected UNet for Medical Image Segmentation.
In ICASSP 2020-2020 IEEE International Conference on Acoustics,
Speech and Signal Processing (ICASSP) (pp. 1055-1059). IEEE.
Input
----------
input_tensor: the input tensor of the base, e.g., `keras.layers.Inpyt((None, None, 3))`.
filter_num_down: a list that defines the number of filters for each
downsampling level. e.g., `[64, 128, 256, 512, 1024]`.
the network depth is expected as `len(filter_num_down)`
filter_num_skip: a list that defines the number of filters after each
full-scale skip connection. Number of elements is expected to be `depth-1`.
i.e., the bottom level is not included.
* Huang et al. (2020) applied the same numbers for all levels.
e.g., `[64, 64, 64, 64]`.
filter_num_aggregate: an int that defines the number of channels of full-scale aggregations.
stack_num_down: number of convolutional layers per downsampling level/block.
stack_num_up: number of convolutional layers (after full-scale concat) per upsampling level/block.
activation: one of the `tensorflow.keras.layers` or `swiss_army_keras.activations` interfaces, e.g., ReLU
batch_norm: True for batch normalization.
pool: True or 'max' for MaxPooling2D.
'ave' for AveragePooling2D.
False for strided conv + batch norm + activation.
unpool: True or 'bilinear' for Upsampling2D with bilinear interpolation.
'nearest' for Upsampling2D with nearest interpolation.
False for Conv2DTranspose + batch norm + activation.
name: prefix of the created keras model and its layers.
---------- (keywords of backbone options) ----------
backbone_name: the bakcbone model name. Should be one of the `tensorflow.keras.applications` class.
None (default) means no backbone.
Currently supported backbones are:
(1) VGG16, VGG19
(2) ResNet50, ResNet101, ResNet152
(3) ResNet50V2, ResNet101V2, ResNet152V2
(4) DenseNet121, DenseNet169, DenseNet201
(5) EfficientNetB[0-7]
weights: one of None (random initialization), 'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
freeze_backbone: True for a frozen backbone.
freeze_batch_norm: False for not freezing batch normalization layers.
* Downsampling is achieved through maxpooling and can be replaced by strided convolutional layers here.
* Upsampling is achieved through bilinear interpolation and can be replaced by transpose convolutional layers here.
Output
----------
A list of tensors with the first/second/third tensor obtained from
the deepest/second deepest/third deepest upsampling block, etc.
* The feature map sizes of these tensors are different,
with the first tensor has the smallest size.
'''
depth_ = len(filter_num_down)
X_encoder = []
multiscale_resizing = None
X_encoder_small = None
# no backbone cases
if backbone is None:
X = input_tensor
# stacked conv2d before downsampling
X = CONV_stack(X, filter_num_down[0], kernel_size=3, stack_num=stack_num_down,
activation=activation, batch_norm=batch_norm, name='{}_down0'.format(name))
X_encoder.append(X)
# downsampling levels
for i, f in enumerate(filter_num_down[1:]):
# UNET-like downsampling
X = UNET_left(X, f, kernel_size=3, stack_num=stack_num_down, activation=activation,
pool=pool, batch_norm=batch_norm, name='{}_down{}'.format(name, i+1))
X_encoder.append(X)
preprocessing = dummy_preprocessing
else:
# handling VGG16 and VGG19 separately
if 'VGG' in backbone:
backbone_ = backbone_zoo(
backbone, weights, input_tensor, depth_, freeze_backbone, freeze_batch_norm)
# collecting backbone feature maps
X_encoder = backbone_([input_tensor, ])
depth_encode = len(X_encoder)
preprocessing = backbone_.preprocessing
# for other backbones
else:
if multiscale_factor != 0:
multiscale_resizing = tf.keras.layers.Resizing(int(
input_tensor.shape[1]/multiscale_factor), int(input_tensor.shape[2]/multiscale_factor))(input_tensor)
backbone_small_, _ = backbone_zoo(
'MobileNetV3Large', weights, multiscale_resizing, deep_layer, freeze_backbone, freeze_batch_norm, return_outputs=True)
print(backbone_small_[deep_layer-1])
backbone, preprocessing = backbone_zoo(
backbone, weights, input_tensor, deep_layer, freeze_backbone, freeze_batch_norm, return_outputs=True)
X_encoder = backbone[deep_layer-1]
X_encoder_shallow = backbone[shallow_layer-1]
# X_encoder_small = backbone_small_[deep
#X_encoder_small = backbone_small_[deep_layer-1]([multiscale_resizing, ])
else:
backbone_ = backbone_zoo(
backbone, weights, input_tensor, deep_layer, freeze_backbone, freeze_batch_norm)
# collecting backbone feature maps
X_encoder = backbone_([input_tensor, ])
preprocessing = backbone_.preprocessing
depth_encode = len(X_encoder) + 1
if multiscale_factor != 0:
X_encoder_back = tf.keras.layers.Resizing(
X_encoder.shape[1], X_encoder.shape[2])(backbone_small_[deep_layer-1])
#X_encoder_back = X_encoder[deep_layer-1]
x = Concatenate(axis=-1)([X_encoder, X_encoder_back])
#x = X_encoder_back
print(X_encoder)
print(X_encoder_back)
print(x)
x = Model([input_tensor, ], [x, ])
if freeze_backbone:
x = freeze_model(x, freeze_batch_norm=freeze_batch_norm)
x = x([input_tensor, ])
else:
x = X_encoder[deep_layer-1]
x = DilatedSpatialPyramidPooling(
x, atrous_rates, num_filters=num_filters_deep)
input_a = UpSampling2D(
size=(input_tensor.shape[1] // shallow_resize_map[shallow_layer] // x.shape[1],
input_tensor.shape[2] // shallow_resize_map[shallow_layer] // x.shape[2]),
interpolation="bilinear",
)(x)
if multiscale_factor != 0:
input_b = X_encoder_shallow
else:
input_b = X_encoder[shallow_layer-1]
input_b = convolution_block(
input_b, num_filters=num_filters_shallow, kernel_size=1)
x = Concatenate(axis=-1)([input_a, input_b])
x = convolution_block(x)
x = convolution_block(x)
x = UpSampling2D(
size=(input_tensor.shape[1] // x.shape[1],
input_tensor.shape[2] // x.shape[2]),
interpolation="bilinear",
)(x)
model_output = Conv2D(n_labels, kernel_size=(1, 1), padding="same")(x)
m = Model([input_tensor, ], [model_output, ])
m.preprocessing = preprocessing
return m
def deeplab_v3_plus_lite(input_tensor, n_labels, filter_num_down=[64, 128, 256, 512, 1024],
deep_layer=5, shallow_layer=2, atrous_rates=[6, 12, 18],
stack_num_down=2, stack_num_up=1, activation='ReLU', batch_norm=False, pool=True, unpool=True,
backbone=None, weights='imagenet', freeze_backbone=True, freeze_batch_norm=True, name='deeplab_v3_plus_lite'):
'''
The base of UNET 3+ with an optional ImagNet-trained backbone.
unet_3plus_2d_base(input_tensor, filter_num_down, filter_num_skip, filter_num_aggregate,
stack_num_down=2, stack_num_up=1, activation='ReLU', batch_norm=False, pool=True, unpool=True,
backbone=None, weights='imagenet', freeze_backbone=True, freeze_batch_norm=True, name='unet3plus')
----------
Huang, H., Lin, L., Tong, R., Hu, H., Zhang, Q., Iwamoto, Y., Han, X., Chen, Y.W. and Wu, J., 2020.
UNet 3+: A Full-Scale Connected UNet for Medical Image Segmentation.
In ICASSP 2020-2020 IEEE International Conference on Acoustics,
Speech and Signal Processing (ICASSP) (pp. 1055-1059). IEEE.
Input
----------
input_tensor: the input tensor of the base, e.g., `keras.layers.Inpyt((None, None, 3))`.
filter_num_down: a list that defines the number of filters for each
downsampling level. e.g., `[64, 128, 256, 512, 1024]`.
the network depth is expected as `len(filter_num_down)`
filter_num_skip: a list that defines the number of filters after each
full-scale skip connection. Number of elements is expected to be `depth-1`.
i.e., the bottom level is not included.
* Huang et al. (2020) applied the same numbers for all levels.
e.g., `[64, 64, 64, 64]`.
filter_num_aggregate: an int that defines the number of channels of full-scale aggregations.
stack_num_down: number of convolutional layers per downsampling level/block.
stack_num_up: number of convolutional layers (after full-scale concat) per upsampling level/block.
activation: one of the `tensorflow.keras.layers` or `swiss_army_keras.activations` interfaces, e.g., ReLU
batch_norm: True for batch normalization.
pool: True or 'max' for MaxPooling2D.
'ave' for AveragePooling2D.
False for strided conv + batch norm + activation.
unpool: True or 'bilinear' for Upsampling2D with bilinear interpolation.
'nearest' for Upsampling2D with nearest interpolation.
False for Conv2DTranspose + batch norm + activation.
name: prefix of the created keras model and its layers.
---------- (keywords of backbone options) ----------
backbone_name: the bakcbone model name. Should be one of the `tensorflow.keras.applications` class.
None (default) means no backbone.
Currently supported backbones are:
(1) VGG16, VGG19
(2) ResNet50, ResNet101, ResNet152
(3) ResNet50V2, ResNet101V2, ResNet152V2
(4) DenseNet121, DenseNet169, DenseNet201
(5) EfficientNetB[0-7]
weights: one of None (random initialization), 'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
freeze_backbone: True for a frozen backbone.
freeze_batch_norm: False for not freezing batch normalization layers.
* Downsampling is achieved through maxpooling and can be replaced by strided convolutional layers here.
* Upsampling is achieved through bilinear interpolation and can be replaced by transpose convolutional layers here.
Output
----------
A list of tensors with the first/second/third tensor obtained from
the deepest/second deepest/third deepest upsampling block, etc.
* The feature map sizes of these tensors are different,
with the first tensor has the smallest size.
'''
depth_ = len(filter_num_down)
X_encoder = []
# no backbone cases
if backbone is None:
X = input_tensor
# stacked conv2d before downsampling
X = CONV_stack(X, filter_num_down[0], kernel_size=3, stack_num=stack_num_down,
activation=activation, batch_norm=batch_norm, name='{}_down0'.format(name))
X_encoder.append(X)
# downsampling levels
for i, f in enumerate(filter_num_down[1:]):
# UNET-like downsampling
X = UNET_left(X, f, kernel_size=3, stack_num=stack_num_down, activation=activation,
pool=pool, batch_norm=batch_norm, name='{}_down{}'.format(name, i+1))
X_encoder.append(X)
preprocessing = dummy_preprocessing
else:
# handling VGG16 and VGG19 separately
if 'VGG' in backbone:
backbone_ = backbone_zoo(
backbone, weights, input_tensor, depth_, freeze_backbone, freeze_batch_norm)
# collecting backbone feature maps
X_encoder = backbone_([input_tensor, ])
depth_encode = len(X_encoder)
preprocessing = backbone_.preprocessing
# for other backbones
else:
backbone_ = backbone_zoo(
backbone, weights, input_tensor, deep_layer, freeze_backbone, freeze_batch_norm)
# collecting backbone feature maps
X_encoder = backbone_([input_tensor, ])
depth_encode = len(X_encoder) + 1
preprocessing = backbone_.preprocessing
x = DilatedSpatialPyramidPooling(X_encoder[deep_layer-1], atrous_rates)
print(input_tensor)
input_a = UpSampling2D(
size=(input_tensor.shape[1] // shallow_resize_map[shallow_layer] // x.shape[1],
input_tensor.shape[2] // shallow_resize_map[shallow_layer] // x.shape[2]),
interpolation="bilinear",
)(x)
input_b = X_encoder[shallow_layer-1]
input_b = convolution_block(input_b, num_filters=48, kernel_size=1)
x = Concatenate(axis=-1)([input_a, input_b])
x = convolution_block(x)
x = convolution_block(x)
"""x = UpSampling2D(
size=(input_tensor.shape[1] // x.shape[1],
input_tensor.shape[2] // x.shape[2]),
interpolation="bilinear",
)(x)"""
model_output = Conv2D(n_labels, kernel_size=(1, 1), padding="same")(x)
m = Model([input_tensor, ], [model_output, ])
m.preprocessing = preprocessing
return m
| 43.711165
| 143
| 0.640291
| 2,235
| 18,009
| 4.944519
| 0.138255
| 0.034838
| 0.016469
| 0.01665
| 0.80599
| 0.775133
| 0.75052
| 0.747806
| 0.747806
| 0.747806
| 0
| 0.029173
| 0.2672
| 18,009
| 411
| 144
| 43.817518
| 0.808214
| 0.431173
| 0
| 0.540816
| 0
| 0
| 0.029357
| 0.011139
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02551
| false
| 0
| 0.066327
| 0
| 0.117347
| 0.02551
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
aa83178a1f6aee51f60c3a5725f54dff28ef952f
| 7,540
|
py
|
Python
|
src/marketing/models.py
|
caesarorz/complete-ecommerce
|
35493812167c208c166df3048190a9988adf6bb0
|
[
"MIT"
] | null | null | null |
src/marketing/models.py
|
caesarorz/complete-ecommerce
|
35493812167c208c166df3048190a9988adf6bb0
|
[
"MIT"
] | null | null | null |
src/marketing/models.py
|
caesarorz/complete-ecommerce
|
35493812167c208c166df3048190a9988adf6bb0
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save, pre_save
from .utils import Mailchimp
class MarketingPreference(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
subscribed = models.BooleanField(default=True)
mailchimp_subscribed = models.NullBooleanField(blank=True)
mailchimp_msg = models.TextField(null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.email
#########################
def marketing_pref_create_receiver(sender, instance, created, *args, **kwargs):
print("marketing_pref_create_receiver")
print("instance", instance)
if created:
status_code, response_data = Mailchimp().subscribe(instance.user.email)
# Mailchimp().add_email(instance.user.email)
# response_data = Mailchimp().subscribe(instance.user.email)
print("status_code", status_code, "response_data", response_data)
post_save.connect(marketing_pref_create_receiver, sender=MarketingPreference)
##################3
def marketing_pref_update_receiver(sender, instance, *args, **kwargs):
print("marketing_pref_update_receiver")
print("instance", instance)
if instance.subscribed != instance.mailchimp_subscribed:
# subscribing user
if instance.subscribed:
status_code, response_data = Mailchimp().subscribe(instance.user.email)
print("+++", status_code, " *** ", response_data)
# unsubscribing user
else:
status_code, response_data = Mailchimp().unsubscribe(instance.user.email)
print("+++", status_code, " *** ", response_data)
if response_data['status'] == 'subscribed':
instance.subscribed = True
instance.mailchimp_subscribed = True
instance.mailchimp_msg = response_data
print("+++", status_code, " *** ", response_data)
else:
instance.subscribe = False
instance.mailchimp_subscribed = False
instance.mailchimp_msg = response_data
print("+++", status_code, " *** ", response_data)
pre_save.connect(marketing_pref_update_receiver, sender=MarketingPreference)
##############3
def make_marketing_pref_receiver(sender, instance, created, *args, **kwargs):
print("make_marketing_pref_receiver")
'''
User model, when I create a user, I also create a marketing
'''
if created:
print("make_marketing_pref_receiver instance", instance)
MarketingPreference.objects.get_or_create(user=instance)
post_save.connect(make_marketing_pref_receiver, sender=settings.AUTH_USER_MODEL)
'''
def marketing_pref_create_receiver(sender, instance, created, *args, **kwargs):
if created:
status_code, response_data = Mailchimp().subscribe(instance.user.email)
print(status_code, response_data)
post_save.connect(marketing_pref_create_receiver, sender=MarketingPreference)
def marketing_pref_update_receiver(sender, instance, *args, **kwargs):
if instance.subscribed != instance.mailchimp_subscribed:
if instance.subscribed:
# subscribing user
status_code, response_data = Mailchimp().subscribe(instance.user.email)
else:
# unsubscribing user
status_code, response_data = Mailchimp().unsubscribe(instance.user.email)
if response_data['status'] == 'subscribed':
instance.subscribed = True
instance.mailchimp_subscribed = True
instance.mailchimp_msg = response_data
else:
instance.subscribed = False
instance.mailchimp_subscribed = False
instance.mailchimp_msg = response_data
pre_save.connect(marketing_pref_update_receiver, sender=MarketingPreference)
def make_marketing_pref_receiver(sender, instance, created, *args, **kwargs):
User model
if created:
MarketingPreference.objects.get_or_create(user=instance)
post_save.connect(make_marketing_pref_receiver, sender=settings.AUTH_USER_MODEL)
'''
#
#
# def marketing_pref_create_receiver(sender, instance, created, *args, **kwargs):
# if created:
# status_code, response_data = Mailchimp().subscribe(instance.user.email)
# print(status_code, response_data)
#
#
# post_save.connect(marketing_pref_create_receiver, sender=MarketingPreference)
#
# def marketing_pref_update_receiver(sender, instance, *args, **kwargs):
# if instance.subscribed != instance.mailchimp_subscribed:
# if instance.subscribed:
# # subscribing user
# status_code, response_data = Mailchimp().subscribe(instance.user.email)
# else:
# # unsubscribing user
# status_code, response_data = Mailchimp().unsubscribe(instance.user.email)
#
# if response_data['status'] == 'subscribed':
# instance.subscribed = True
# instance.mailchimp_subscribed = True
# instance.mailchimp_msg = response_data
# else:
# instance.subscribed = False
# instance.mailchimp_subscribed = False
# instance.mailchimp_msg = response_data
#
# pre_save.connect(marketing_pref_update_receiver, sender=MarketingPreference)
#
#
#
#
# from django.conf import settings
# from django.db import models
# from django.db.models.signals import post_save, pre_save
#
# # Create your models here.
#
# from .utils import Mailchimp
#
# class MarketingPreference(models.Model):
# user = models.OneToOneField(settings.AUTH_USER_MODEL)
# subscribed = models.BooleanField(default=True)
# mailchimp_subscribed = models.NullBooleanField(blank=True)
# mailchimp_msg = models.TextField(null=True, blank=True)
# timestamp = models.DateTimeField(auto_now_add=True)
# update = models.DateTimeField(auto_now=True)
#
# def __str__(self):
# return self.user.email
#
# if instance.subscribed != instance.mailchimp_subscribed:
# if instance.subscribed:
# # subscribe User
# status_code, response_data = Mailchimp().subscribe(instance.user.email)
# else:
# # unsubscribe user
# status_code, response_data = Mailchimp().unsubscribe(instance.user.email)
#
# if response_data['status'] == 'subscribed':
# instance.subscribed = True
# instance.mailchimp_subscribed = True
# instance.mailchimp_msg = response_data
# else:
# instance.subscribed = False
# instance.mailchimp_subscribed = False
# instance.mailchimp_msg = response_data
# def make_marketing_pref_receiver(sender, instance, created, *args, **kwargs):
# """
# user model
# """
# if created:
# MarketingPreference.objects.get_or_create(user=instance)
#
# post_save.connect(make_marketing_pref_receiver, sender=settings.AUTH_USER_MODEL)
#
# def marketing_pref_create_receiver(sender, instance, created, *args, **kwargs):
# if created:
# print(status_code, " " ,response_data)
# status_code, response_data = Mailchimp().subscribe(instance.user.email)
#
# post_save.connect(marketing_pref_create_receiver, sender=MarketingPreference)
| 36.601942
| 106
| 0.672546
| 774
| 7,540
| 6.297158
| 0.098191
| 0.083709
| 0.073861
| 0.090275
| 0.940501
| 0.905006
| 0.895363
| 0.893311
| 0.887772
| 0.8254
| 0
| 0.000341
| 0.222149
| 7,540
| 205
| 107
| 36.780488
| 0.830691
| 0.416844
| 0
| 0.297872
| 0
| 0
| 0.076895
| 0.041877
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.085106
| 0.021277
| 0.340426
| 0.234043
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
aa8ee35924e810f57b70a9053c8e175b2eb5b02a
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/virtualenv/run/__init__.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/virtualenv/run/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/virtualenv/run/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/c9/1b/ba/2a853b246972839cc54dae756a260a22adfab54a61c47687ce649d8db5
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.40625
| 0
| 96
| 1
| 96
| 96
| 0.489583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2a9bd8ffb99e7f94e5e4e24efff8989882144ace
| 41
|
py
|
Python
|
second/mayank_scripts/prac.py
|
mayanks888/second.pytorch
|
02d37885a543ee46516648dcab7db8f5d677a179
|
[
"MIT"
] | null | null | null |
second/mayank_scripts/prac.py
|
mayanks888/second.pytorch
|
02d37885a543ee46516648dcab7db8f5d677a179
|
[
"MIT"
] | null | null | null |
second/mayank_scripts/prac.py
|
mayanks888/second.pytorch
|
02d37885a543ee46516648dcab7db8f5d677a179
|
[
"MIT"
] | null | null | null |
# import mayavi.api
import open3d as o3d
| 13.666667
| 20
| 0.780488
| 7
| 41
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.170732
| 41
| 2
| 21
| 20.5
| 0.882353
| 0.414634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2aa0f838d83977d295045a182f65dbb931118fb0
| 19,151
|
py
|
Python
|
sdk/python/pulumi_azure_native/provider.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/provider.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/provider.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ProviderArgs', 'Provider']
@pulumi.input_type
class ProviderArgs:
def __init__(__self__, *,
auxiliary_tenant_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_certificate_password: Optional[pulumi.Input[str]] = None,
client_certificate_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
disable_pulumi_partner_id: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
msi_endpoint: Optional[pulumi.Input[str]] = None,
partner_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_msi: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Provider resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] auxiliary_tenant_ids: Any additional Tenant IDs which should be used for authentication.
:param pulumi.Input[str] client_certificate_password: The password associated with the Client Certificate. For use when authenticating as a Service Principal using a Client Certificate
:param pulumi.Input[str] client_certificate_path: The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service Principal using a Client Certificate.
:param pulumi.Input[str] client_id: The Client ID which should be used.
:param pulumi.Input[str] client_secret: The Client Secret which should be used. For use When authenticating as a Service Principal using a Client Secret.
:param pulumi.Input[bool] disable_pulumi_partner_id: This will disable the Pulumi Partner ID which is used if a custom `partnerId` isn't specified.
:param pulumi.Input[str] environment: The Cloud Environment which should be used. Possible values are public, usgovernment, german, and china. Defaults to public.
:param pulumi.Input[str] msi_endpoint: The path to a custom endpoint for Managed Service Identity - in most circumstances this should be detected automatically.
:param pulumi.Input[str] partner_id: A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.
:param pulumi.Input[str] subscription_id: The Subscription ID which should be used.
:param pulumi.Input[str] tenant_id: The Tenant ID which should be used.
:param pulumi.Input[bool] use_msi: Allowed Managed Service Identity be used for Authentication.
"""
if auxiliary_tenant_ids is not None:
pulumi.set(__self__, "auxiliary_tenant_ids", auxiliary_tenant_ids)
if client_certificate_password is None:
client_certificate_password = _utilities.get_env('ARM_CLIENT_CERTIFICATE_PASSWORD')
if client_certificate_password is not None:
pulumi.set(__self__, "client_certificate_password", client_certificate_password)
if client_certificate_path is None:
client_certificate_path = _utilities.get_env('ARM_CLIENT_CERTIFICATE_PATH')
if client_certificate_path is not None:
pulumi.set(__self__, "client_certificate_path", client_certificate_path)
if client_id is None:
client_id = _utilities.get_env('ARM_CLIENT_ID')
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is None:
client_secret = _utilities.get_env('ARM_CLIENT_SECRET')
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if disable_pulumi_partner_id is None:
disable_pulumi_partner_id = _utilities.get_env_bool('ARM_DISABLE_PULUMI_PARTNER_ID')
if disable_pulumi_partner_id is not None:
pulumi.set(__self__, "disable_pulumi_partner_id", disable_pulumi_partner_id)
if environment is None:
environment = (_utilities.get_env('ARM_ENVIRONMENT') or 'public')
if environment is not None:
pulumi.set(__self__, "environment", environment)
if msi_endpoint is None:
msi_endpoint = _utilities.get_env('ARM_MSI_ENDPOINT')
if msi_endpoint is not None:
pulumi.set(__self__, "msi_endpoint", msi_endpoint)
if partner_id is None:
partner_id = _utilities.get_env('ARM_PARTNER_ID')
if partner_id is not None:
pulumi.set(__self__, "partner_id", partner_id)
if subscription_id is None:
subscription_id = _utilities.get_env('ARM_SUBSCRIPTION_ID')
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if tenant_id is None:
tenant_id = _utilities.get_env('ARM_TENANT_ID')
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if use_msi is None:
use_msi = (_utilities.get_env_bool('ARM_USE_MSI') or False)
if use_msi is not None:
pulumi.set(__self__, "use_msi", use_msi)
@property
@pulumi.getter(name="auxiliaryTenantIds")
def auxiliary_tenant_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Any additional Tenant IDs which should be used for authentication.
"""
return pulumi.get(self, "auxiliary_tenant_ids")
@auxiliary_tenant_ids.setter
def auxiliary_tenant_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "auxiliary_tenant_ids", value)
@property
@pulumi.getter(name="clientCertificatePassword")
def client_certificate_password(self) -> Optional[pulumi.Input[str]]:
"""
The password associated with the Client Certificate. For use when authenticating as a Service Principal using a Client Certificate
"""
return pulumi.get(self, "client_certificate_password")
@client_certificate_password.setter
def client_certificate_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_certificate_password", value)
@property
@pulumi.getter(name="clientCertificatePath")
def client_certificate_path(self) -> Optional[pulumi.Input[str]]:
"""
The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service Principal using a Client Certificate.
"""
return pulumi.get(self, "client_certificate_path")
@client_certificate_path.setter
def client_certificate_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_certificate_path", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The Client ID which should be used.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
"""
The Client Secret which should be used. For use When authenticating as a Service Principal using a Client Secret.
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="disablePulumiPartnerId")
def disable_pulumi_partner_id(self) -> Optional[pulumi.Input[bool]]:
"""
This will disable the Pulumi Partner ID which is used if a custom `partnerId` isn't specified.
"""
return pulumi.get(self, "disable_pulumi_partner_id")
@disable_pulumi_partner_id.setter
def disable_pulumi_partner_id(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_pulumi_partner_id", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input[str]]:
"""
The Cloud Environment which should be used. Possible values are public, usgovernment, german, and china. Defaults to public.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter(name="msiEndpoint")
def msi_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The path to a custom endpoint for Managed Service Identity - in most circumstances this should be detected automatically.
"""
return pulumi.get(self, "msi_endpoint")
@msi_endpoint.setter
def msi_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "msi_endpoint", value)
@property
@pulumi.getter(name="partnerId")
def partner_id(self) -> Optional[pulumi.Input[str]]:
"""
A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.
"""
return pulumi.get(self, "partner_id")
@partner_id.setter
def partner_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_id", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The Subscription ID which should be used.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The Tenant ID which should be used.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="useMsi")
def use_msi(self) -> Optional[pulumi.Input[bool]]:
"""
Allowed Managed Service Identity be used for Authentication.
"""
return pulumi.get(self, "use_msi")
@use_msi.setter
def use_msi(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_msi", value)
class Provider(pulumi.ProviderResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auxiliary_tenant_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_certificate_password: Optional[pulumi.Input[str]] = None,
client_certificate_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
disable_pulumi_partner_id: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
msi_endpoint: Optional[pulumi.Input[str]] = None,
partner_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_msi: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
The provider type for the native Azure package.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] auxiliary_tenant_ids: Any additional Tenant IDs which should be used for authentication.
:param pulumi.Input[str] client_certificate_password: The password associated with the Client Certificate. For use when authenticating as a Service Principal using a Client Certificate
:param pulumi.Input[str] client_certificate_path: The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service Principal using a Client Certificate.
:param pulumi.Input[str] client_id: The Client ID which should be used.
:param pulumi.Input[str] client_secret: The Client Secret which should be used. For use When authenticating as a Service Principal using a Client Secret.
:param pulumi.Input[bool] disable_pulumi_partner_id: This will disable the Pulumi Partner ID which is used if a custom `partnerId` isn't specified.
:param pulumi.Input[str] environment: The Cloud Environment which should be used. Possible values are public, usgovernment, german, and china. Defaults to public.
:param pulumi.Input[str] msi_endpoint: The path to a custom endpoint for Managed Service Identity - in most circumstances this should be detected automatically.
:param pulumi.Input[str] partner_id: A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.
:param pulumi.Input[str] subscription_id: The Subscription ID which should be used.
:param pulumi.Input[str] tenant_id: The Tenant ID which should be used.
:param pulumi.Input[bool] use_msi: Allowed Managed Service Identity be used for Authentication.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ProviderArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The provider type for the native Azure package.
:param str resource_name: The name of the resource.
:param ProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auxiliary_tenant_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_certificate_password: Optional[pulumi.Input[str]] = None,
client_certificate_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
disable_pulumi_partner_id: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
msi_endpoint: Optional[pulumi.Input[str]] = None,
partner_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_msi: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProviderArgs.__new__(ProviderArgs)
__props__.__dict__["auxiliary_tenant_ids"] = pulumi.Output.from_input(auxiliary_tenant_ids).apply(pulumi.runtime.to_json) if auxiliary_tenant_ids is not None else None
if client_certificate_password is None:
client_certificate_password = _utilities.get_env('ARM_CLIENT_CERTIFICATE_PASSWORD')
__props__.__dict__["client_certificate_password"] = client_certificate_password
if client_certificate_path is None:
client_certificate_path = _utilities.get_env('ARM_CLIENT_CERTIFICATE_PATH')
__props__.__dict__["client_certificate_path"] = client_certificate_path
if client_id is None:
client_id = _utilities.get_env('ARM_CLIENT_ID')
__props__.__dict__["client_id"] = client_id
if client_secret is None:
client_secret = _utilities.get_env('ARM_CLIENT_SECRET')
__props__.__dict__["client_secret"] = client_secret
if disable_pulumi_partner_id is None:
disable_pulumi_partner_id = _utilities.get_env_bool('ARM_DISABLE_PULUMI_PARTNER_ID')
__props__.__dict__["disable_pulumi_partner_id"] = pulumi.Output.from_input(disable_pulumi_partner_id).apply(pulumi.runtime.to_json) if disable_pulumi_partner_id is not None else None
if environment is None:
environment = (_utilities.get_env('ARM_ENVIRONMENT') or 'public')
__props__.__dict__["environment"] = environment
if msi_endpoint is None:
msi_endpoint = _utilities.get_env('ARM_MSI_ENDPOINT')
__props__.__dict__["msi_endpoint"] = msi_endpoint
if partner_id is None:
partner_id = _utilities.get_env('ARM_PARTNER_ID')
__props__.__dict__["partner_id"] = partner_id
if subscription_id is None:
subscription_id = _utilities.get_env('ARM_SUBSCRIPTION_ID')
__props__.__dict__["subscription_id"] = subscription_id
if tenant_id is None:
tenant_id = _utilities.get_env('ARM_TENANT_ID')
__props__.__dict__["tenant_id"] = tenant_id
if use_msi is None:
use_msi = (_utilities.get_env_bool('ARM_USE_MSI') or False)
__props__.__dict__["use_msi"] = pulumi.Output.from_input(use_msi).apply(pulumi.runtime.to_json) if use_msi is not None else None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="pulumi:providers:azure-nextgen")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Provider, __self__).__init__(
'azure-native',
resource_name,
__props__,
opts)
| 53.049861
| 209
| 0.674586
| 2,336
| 19,151
| 5.244863
| 0.077055
| 0.082599
| 0.079987
| 0.080803
| 0.837741
| 0.778159
| 0.732697
| 0.68144
| 0.650343
| 0.618675
| 0
| 0.000068
| 0.236646
| 19,151
| 360
| 210
| 53.197222
| 0.838019
| 0.257428
| 0
| 0.417671
| 1
| 0
| 0.111364
| 0.041935
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116466
| false
| 0.068273
| 0.02008
| 0
| 0.192771
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
630801c27149171cd88e85dbf66f0d5a3556338c
| 47,201
|
py
|
Python
|
pirates/leveleditor/worldData/rambleshack_building_int_tavern.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/leveleditor/worldData/rambleshack_building_int_tavern.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/leveleditor/worldData/rambleshack_building_int_tavern.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.rambleshack_building_int_tavern
from pandac.PandaModules import Point3, VBase3, Vec4
objectStruct = {'Objects': {'1121212983.08Shochet0': {'Type': 'Building Interior', 'Name': 'Tavern', 'Instanced': True, 'Objects': {'1154731709.64jubutler': {'Type': 'Townsperson', 'Category': 'Cast', 'AnimSet': 'tut_dan_idle', 'CustomModel': 'None', 'DNA': '1154731709.64jubutler', 'Hpr': VBase3(180.0, 0.0, 0.0), 'Patrol Radius': 12, 'Pos': Point3(1.5, 34.837, 1.082), 'PoseAnim': '', 'PoseFrame': '', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Start State': 'Idle', 'Team': 'Player'}, '1165268405.64kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(1.221, -1.513, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/table_bar_round'}}, '1165268489.64kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-43.0, -6.934, 1.022), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/table_bar_round'}}, '1165268495.0kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(20.657, 10.41, 0.973), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0), 'Model': 'models/props/table_bar_square'}}, '1165268541.81kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(20.727, 7.267, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/props/stool_bar'}}, '1165268554.8kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(-40.012, 0.0, 0.0), 'Pos': Point3(20.82, 13.043, 0.991), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0), 'Model': 'models/props/stool_bar'}}, '1165268615.13kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(22.064, 23.617, 0.913), 'Scale': VBase3(0.618, 0.618, 0.618), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/barrel'}}, '1165268794.17kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(23.941, 21.496, 1.0), 'Scale': VBase3(0.575, 0.575, 0.575), 'Visual': {'Color': (0.30000001192092896, 0.30000001192092896, 0.30000001192092896, 1.0), 'Model': 'models/props/barrel'}}, '1165269869.89kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(-1.309, 0.0, 0.0), 'Pos': Point3(7.739, 29.905, 12.524), 'Scale': VBase3(0.46, 0.46, 0.46), 'Visual': {'Color': (0.7490196228027344, 0.7137255072593689, 0.6000000238418579, 1.0), 'Model': 'models/props/barrel_worn'}}, '1165270073.72kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(-1.309, 0.0, 0.0), 'Pos': Point3(4.298, 30.046, 12.288), 'Scale': VBase3(0.975, 0.975, 0.975), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/barrel_grey'}}, '1165270537.52kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': VBase3(-1.622, 0.0, 0.0), 'Pos': Point3(0.788, 31.562, 12.225), 'Scale': VBase3(1.222, 1.222, 1.222), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/crate'}}, '1165270634.13kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-11.605, 30.761, 11.853), 'Scale': VBase3(1.705, 1.705, 1.705), 'Visual': {'Model': 'models/props/winebottle_A'}}, '1165270678.5kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-20.923, 40.318, 4.617), 'Scale': VBase3(1.705, 1.705, 1.705), 'Visual': {'Model': 'models/props/winebottle_A'}}, '1165270699.58kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-13.352, 30.733, 11.581), 'Scale': VBase3(1.558, 1.558, 1.558), 'Visual': {'Model': 'models/props/winebottle_B'}}, '1165270724.27kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': VBase3(-91.96, 1.684, 0.0), 'Pos': Point3(-17.093, 31.487, 11.741), 'Scale': VBase3(1.075, 1.075, 1.075), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/crate_04'}}, '1165270820.19kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(91.562, -5.922, 0.0), 'Pos': Point3(-23.124, 30.997, 11.497), 'Scale': VBase3(0.709, 0.709, 0.709), 'Visual': {'Color': (0.7490196228027344, 0.7137255072593689, 0.6000000238418579, 1.0), 'Model': 'models/props/barrel_sideways'}}, '1165270875.49kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(82.332, 1.271, -0.29), 'Pos': Point3(-28.726, 31.291, 11.032), 'Scale': VBase3(0.709, 0.709, 0.709), 'Visual': {'Color': (0.47058823704719543, 0.47058823704719543, 0.47058823704719543, 1.0), 'Model': 'models/props/barrel_sideways'}}, '1165270982.7kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(91.168, 0.0, 0.0), 'Pos': Point3(-7.199, 31.254, 12.192), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.7490196228027344, 0.7137255072593689, 0.6000000238418579, 1.0), 'Model': 'models/props/barrel_sideways'}}, '1165271086.06kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(35.02, 0.0, 0.0), 'Pos': Point3(27.348, 38.547, 1.0), 'Scale': VBase3(0.596, 0.596, 0.596), 'Visual': {'Color': (0.8980392217636108, 0.8039215803146362, 0.6941176652908325, 1.0), 'Model': 'models/props/barrel_group_2'}}, '1165271282.78kmuller': {'Type': 'Cups', 'Hpr': VBase3(-170.935, 0.0, 49.833), 'Pos': Point3(-13.189, 40.89, 7.015), 'Scale': VBase3(1.285, 1.285, 1.285), 'Visual': {'Color': (0.5899999737739563, 0.5299999713897705, 0.44999998807907104, 1.0), 'Model': 'models/props/beerstein'}}, '1165271328.08kmuller': {'Type': 'Cups', 'Hpr': VBase3(-170.935, 0.0, 33.665), 'Pos': Point3(-14.863, 40.059, 6.931), 'Scale': VBase3(1.285, 1.285, 1.285), 'Visual': {'Color': (0.4300000071525574, 0.3499999940395355, 0.4099999964237213, 1.0), 'Model': 'models/props/beerstein'}}, '1165271359.02kmuller': {'Type': 'Cups', 'Hpr': VBase3(-170.935, 0.0, 38.68), 'Pos': Point3(-16.943, 40.519, 6.764), 'Scale': VBase3(1.285, 1.285, 1.285), 'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0), 'Model': 'models/props/beerstein'}}, '1165271411.02kmuller': {'Type': 'Cups', 'Hpr': VBase3(37.07, 0.0, 0.0), 'Pos': Point3(-24.08, 40.27, 7.526), 'Scale': VBase3(1.292, 1.292, 1.292), 'Visual': {'Model': 'models/props/beerstein'}}, '1165271436.39kmuller': {'Type': 'Cups', 'Hpr': VBase3(-110.262, 0.0, 0.0), 'Pos': Point3(-25.305, 40.258, 7.505), 'Scale': VBase3(1.236, 1.236, 1.236), 'Visual': {'Model': 'models/props/beerstein'}}, '1165271581.41kmuller': {'Type': 'Light_Fixtures', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-28.524, 32.551, 4.606), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/candle'}}, '1165271600.35kmuller': {'Type': 'Light_Fixtures', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-5.133, 32.631, 4.606), 'Scale': VBase3(0.99, 0.99, 0.99), 'Visual': {'Model': 'models/props/candle'}}, '1165271626.24kmuller': {'Type': 'Light_Fixtures', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(12.151, 33.135, 4.606), 'Scale': VBase3(0.782, 0.782, 0.782), 'Visual': {'Model': 'models/props/candle'}}, '1165271663.45kmuller': {'Type': 'Light_Fixtures', 'Hpr': VBase3(-99.016, 0.0, 0.0), 'Pos': Point3(21.305, 3.398, 8.673), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/lamp_candle'}}, '1165271702.97kmuller': {'Type': 'Light_Fixtures', 'Hpr': VBase3(-142.668, 0.0, 0.0), 'Pos': Point3(1.53, 2.184, 12.49), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chandelier_jail'}}, '1165271705.67kmuller': {'Type': 'Light_Fixtures', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-29.157, 14.32, 14.109), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chandelier_jail'}}, '1165272003.78kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.0, 0.0, -4.587), 'Pos': Point3(-33.709, 32.15, 9.351), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5199999809265137, 0.5199999809265137, 0.5299999713897705, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165272035.33kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.0, 0.0, -4.587), 'Pos': Point3(-31.803, 31.342, 9.952), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/bottle_green'}}, '1165272160.13kmuller': {'Type': 'Food', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-23.667, 32.701, 6.389), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.800000011920929, 0.7900000214576721, 0.6000000238418579, 1.0), 'Model': 'models/props/garlicString'}}, '1165272166.24kmuller': {'Type': 'Food', 'Hpr': VBase3(-37.81, 0.0, 0.0), 'Pos': Point3(-30.216, 35.507, 5.154), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/ham'}}, '1165272219.58kmuller': {'Type': 'Food', 'Hpr': VBase3(0.0, 0.0, 1.118), 'Pos': Point3(-28.72, 38.198, 5.84), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.30000001192092896, 0.30000001192092896, 0.30000001192092896, 1.0), 'Model': 'models/props/sausage'}}, '1165272967.63kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(53.674, 0.0, 0.0), 'Pos': Point3(6.392, -4.588, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5490196347236633, 0.6235294342041016, 0.529411792755127, 1.0), 'Model': 'models/props/stool_bar'}}, '1165272984.3kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(97.835, 0.0, 0.0), 'Pos': Point3(-3.3, -5.347, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5490196347236633, 0.6235294342041016, 0.529411792755127, 1.0), 'Model': 'models/props/chair_bar'}}, '1165272995.24kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(-12.615, 0.0, 0.0), 'Pos': Point3(4.575, 5.168, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5490196347236633, 0.6235294342041016, 0.529411792755127, 1.0), 'Model': 'models/props/chair_bar'}}, '1165273011.24kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(1.985, -16.167, 4.148), 'Pos': Point3(-5.928, 8.639, 1.079), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5490196347236633, 0.6235294342041016, 0.529411792755127, 1.0), 'Model': 'models/props/chair_bar'}}, '1165273214.85kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(17.11, 0.0, 0.0), 'Objects': {}, 'Pos': Point3(-1.894, 30.352, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/stool_bar_tall'}}, '1165273221.42kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(1.785, 0.0, 0.0), 'Pos': Point3(-6.689, 29.581, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6705882549285889, 0.6705882549285889, 0.6705882549285889, 1.0), 'Model': 'models/props/stool_bar_tall'}}, '1165273270.28kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(71.677, 0.0, 0.0), 'Pos': Point3(-32.493, 36.608, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6705882549285889, 0.6705882549285889, 0.6705882549285889, 1.0), 'Model': 'models/props/stool_bar_tall'}}, '1165273300.45kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(91.706, 0.0, 0.0), 'Pos': Point3(-51.886, 32.795, 1.026), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/table_bar_square'}}, '1165273348.77kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(-53.676, 0.0, 0.0), 'Pos': Point3(-52.064, 28.341, 1.16), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/stool_bar'}}, '1165273398.75kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(87.614, 0.0, 0.0), 'Pos': Point3(-58.326, 17.612, 1.113), 'Scale': VBase3(0.763, 0.763, 0.763), 'Visual': {'Color': (0.8500000238418579, 0.8199999928474426, 0.7300000190734863, 1.0), 'Model': 'models/props/barrel_group_1'}}, '1165273535.11kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': VBase3(-40.737, 0.0, 0.0), 'Objects': {'1165277930.75kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(39.838, 0.0, 0.0), 'Pos': Point3(-0.137, 0.304, 8.045), 'Scale': VBase3(0.859, 0.859, 0.859), 'Visual': {'Color': (0.6600000262260437, 0.5600000023841858, 0.550000011920929, 1.0), 'Model': 'models/props/bottle_red'}}}, 'Pos': Point3(-54.133, 20.916, 1.0), 'Scale': VBase3(1.164, 1.164, 1.164), 'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0), 'Model': 'models/props/crate'}}, '1165273600.94kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(-26.053, 0.0, 0.0), 'Pos': Point3(-53.978, 17.263, 1.062), 'Scale': VBase3(0.595, 0.595, 0.595), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/barrel'}}, '1165273684.05kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': VBase3(-18.987, 0.0, 0.0), 'Objects': {}, 'Pos': Point3(-54.651, 13.18, 1.0), 'Scale': VBase3(0.781, 0.781, 0.781), 'Visual': {'Color': (0.699999988079071, 0.7300000190734863, 0.5799999833106995, 1.0), 'Model': 'models/props/crate_04'}}, '1165273741.35kmuller': {'Type': 'Prop_Groups', 'DisableCollision': False, 'Hpr': VBase3(-37.441, 0.0, 0.0), 'Pos': Point3(-59.113, 0.024, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/prop_group_A'}}, '1165273820.41kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Objects': {'1165278536.22kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.0, 55.65, 0.0), 'Pos': Point3(-1.553, 0.356, 8.999), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/props/pitcher_brown'}}}, 'Pos': Point3(-56.764, -1.815, 0.994), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1.0, 0.9599999785423279, 0.75, 1.0), 'Model': 'models/props/crates_group_2'}}, '1165273860.75kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(32.096, 0.0, 0.0), 'Pos': Point3(-60.644, -5.844, 1.242), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.699999988079071, 0.7300000190734863, 0.5799999833106995, 1.0), 'Model': 'models/props/barrel_worn'}}, '1165273910.55kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': VBase3(180.0, -88.675, -180.0), 'Pos': Point3(-55.575, -8.4, 3.89), 'Scale': VBase3(1.388, 1.388, 1.388), 'Visual': {'Color': (0.5199999809265137, 0.5199999809265137, 0.5299999713897705, 1.0), 'Model': 'models/props/crate_04'}}, '1165273998.72kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(155.16, 0.0, 0.0), 'Pos': Point3(-46.334, -11.342, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chair_bar'}}, '1165274008.95kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(55.35, 0.0, 0.0), 'Pos': Point3(-45.887, -0.449, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/chair_bar'}}, '1165274017.97kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(-56.822, 0.0, 0.0), 'Pos': Point3(-37.901, -4.589, 1.381), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/chair_bar'}}, '1165274050.94kmuller': {'Type': 'Light_Fixtures', 'Hpr': VBase3(87.561, 0.0, 0.0), 'Pos': Point3(-55.087, -12.502, 8.381), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/lamp_candle'}}, '1165274102.6kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(-16.523, 0.0, 0.0), 'Pos': Point3(6.59, -21.577, 1.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/bench'}}, '1165274126.86kmuller': {'Type': 'Light_Fixtures', 'Hpr': VBase3(0.0, 0.0, 0.001), 'Pos': Point3(22.139, 10.212, 3.921), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/candle'}}, '1165274146.16kmuller': {'Type': 'Cups', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-1.501, -2.374, 3.956), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/beerstein'}}, '1165274148.94kmuller': {'Type': 'Cups', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(20.694, 9.457, 3.893), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/beerstein'}}, '1165274166.8kmuller': {'Type': 'Cups', 'Hpr': VBase3(-95.251, -58.186, -96.172), 'Pos': Point3(-3.206, -1.517, 4.173), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/beerstein'}}, '1165274656.42kmuller': {'Type': 'Sack', 'DisableCollision': False, 'Hpr': VBase3(-3.491, 3.38, 52.483), 'Pos': Point3(-55.052, -18.738, 2.824), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0), 'Model': 'models/props/Sack'}}, '1165274726.1kmuller': {'Type': 'Sack', 'DisableCollision': False, 'Hpr': VBase3(-84.038, -50.399, 10.433), 'Pos': Point3(-55.816, -16.637, 1.658), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0), 'Model': 'models/props/Sack'}}, '1165274773.6kmuller': {'Type': 'Sack', 'DisableCollision': False, 'Hpr': VBase3(-64.156, 0.0, 0.0), 'Pos': Point3(-50.775, -16.891, 0.961), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/Sack'}}, '1165274848.8kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.185, 0.0), 'Pos': Point3(11.335, 43.701, 4.67), 'Scale': VBase3(0.727, 0.727, 0.727), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/barrel_sideways'}}, '1165274894.11kmuller': {'Type': 'Light_Fixtures', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(16.891, 41.83, 4.629), 'Scale': VBase3(1.212, 1.212, 1.212), 'Visual': {'Model': 'models/props/candle'}}, '1165274931.35kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.185, 0.0), 'Pos': Point3(14.911, 43.357, 4.606), 'Scale': VBase3(0.893, 0.893, 0.893), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/barrel_worn'}}, '1165274976.27kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(2.515, 42.559, 4.549), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.44999998807907104, 0.550000011920929, 0.44999998807907104, 1.0), 'Model': 'models/props/bottle_green'}}, '1165275023.81kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.185, 0.0), 'Pos': Point3(-0.651, 41.551, 4.633), 'Scale': VBase3(0.78, 0.78, 0.78), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/barrel_worn'}}, '1165275062.16kmuller': {'Type': 'Cups', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(0.998, 40.515, 4.61), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/beerstein'}}, '1165275084.2kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(3.879, 42.415, 4.597), 'Scale': VBase3(1.365, 1.365, 1.365), 'Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0), 'Model': 'models/props/bottle_red'}}, '1165275123.75kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(4.667, 42.554, 4.606), 'Scale': VBase3(0.837, 0.837, 0.837), 'Visual': {'Color': (0.800000011920929, 0.7900000214576721, 0.6000000238418579, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165275138.58kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(5.112, 42.681, 4.606), 'Scale': VBase3(0.692, 0.692, 0.692), 'Visual': {'Color': (0.5899999737739563, 0.5299999713897705, 0.44999998807907104, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165275160.58kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(1.516, -0.636, 3.956), 'Scale': VBase3(0.708, 0.708, 0.708), 'Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0), 'Model': 'models/props/pitcher_brown'}}, '1165275186.05kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(8.908, 41.951, 4.606), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5199999809265137, 0.5199999809265137, 0.5299999713897705, 1.0), 'Model': 'models/props/pitcher_brown'}}, '1165275192.22kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(-62.795, 0.0, 0.0), 'Pos': Point3(5.89, 41.712, 4.606), 'Scale': VBase3(1.78, 1.78, 1.78), 'Visual': {'Model': 'models/props/winebottle_B'}}, '1165275274.69kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(-23.802, 0.0, 0.0), 'Pos': Point3(7.435, 42.683, 4.606), 'Scale': VBase3(1.524, 1.524, 1.524), 'Visual': {'Model': 'models/props/waterpitcher'}}, '1165275305.1kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-5.128, 40.817, 4.572), 'Scale': VBase3(0.799, 0.799, 0.799), 'Visual': {'Model': 'models/props/crates_group_2'}}, '1165275369.2kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-7.292, 41.047, 4.663), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.4300000071525574, 0.3499999940395355, 0.3499999940395355, 1.0), 'Model': 'models/props/pitcher_brown'}}, '1165275380.81kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-7.739, 40.094, 4.581), 'Scale': VBase3(1.989, 1.989, 1.989), 'Visual': {'Model': 'models/props/waterpitcher'}}, '1165275418.02kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-8.999, 41.229, 4.606), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5899999737739563, 0.5899999737739563, 0.49000000953674316, 1.0), 'Model': 'models/props/largejug_B'}}, '1165275431.75kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-9.109, 39.499, 4.616), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5899999737739563, 0.5899999737739563, 0.49000000953674316, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165275460.86kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.0, 3.523, 0.0), 'Pos': Point3(-9.92, 40.161, 4.53), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6600000262260437, 0.5600000023841858, 0.550000011920929, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165275483.31kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-10.727, 40.776, 4.616), 'Scale': VBase3(1.471, 1.471, 1.471), 'Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0), 'Model': 'models/props/bottle_red'}}, '1165275502.77kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-13.273, 40.377, 4.6), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.44999998807907104, 0.550000011920929, 0.44999998807907104, 1.0), 'Model': 'models/props/bottle_green'}}, '1165275545.97kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.0, -0.206, 0.0), 'Pos': Point3(-14.902, 39.847, 4.628), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6600000262260437, 0.5600000023841858, 0.550000011920929, 1.0), 'Model': 'models/props/pitcher_brown'}}, '1165275567.7kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.68, -0.159, 0.002), 'Pos': Point3(-15.969, 39.736, 4.629), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5199999809265137, 0.5199999809265137, 0.5299999713897705, 1.0), 'Model': 'models/props/bottle_brown'}}, '1165275597.6kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.68, -0.159, 0.002), 'Pos': Point3(-17.626, 40.536, 4.607), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.7200000286102295, 0.699999988079071, 0.5899999737739563, 1.0), 'Model': 'models/props/bottle_green'}}, '1165275616.55kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.0, 0.0, 0.228), 'Pos': Point3(-17.152, 38.885, 4.62), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5199999809265137, 0.5199999809265137, 0.5299999713897705, 1.0), 'Model': 'models/props/bottle_red'}}, '1165275673.27kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.0, 0.0, 0.0), 'Pos': Point3(-19.82, 39.39, 4.614), 'Scale': VBase3(1.715, 1.715, 1.715), 'Visual': {'Model': 'models/props/jug'}}, '1165275716.6kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-18.878, 40.332, 4.55), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6600000262260437, 0.5400000214576721, 0.4699999988079071, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165275743.11kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(178.747, 1.888, -56.421), 'Pos': Point3(-24.771, 39.987, 6.025), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.699999988079071, 0.5699999928474426, 0.4699999988079071, 1.0), 'Model': 'models/props/pitcher_brown'}}, '1165275789.53kmuller': {'Type': 'Cups', 'Hpr': VBase3(-0.451, -2.801, 78.146), 'Pos': Point3(-21.936, 40.519, 6.909), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5098039507865906, 0.5098039507865906, 0.5098039507865906, 1.0), 'Model': 'models/props/cup_tin'}}, '1165275887.19kmuller': {'Type': 'Cups', 'Hpr': VBase3(-0.451, -2.801, 81.683), 'Pos': Point3(-23.15, 40.076, 6.846), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5098039507865906, 0.5098039507865906, 0.5098039507865906, 1.0), 'Model': 'models/props/cup_tin'}}, '1165275933.31kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-20.562, 40.941, 7.609), 'Scale': VBase3(0.629, 0.629, 0.629), 'Visual': {'Color': (0.30000001192092896, 0.30000001192092896, 0.30000001192092896, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165275976.81kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-21.148, 40.093, 7.62), 'Scale': VBase3(0.629, 0.629, 0.629), 'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165276023.44kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-18.7, 40.422, 7.793), 'Scale': VBase3(0.747, 0.747, 0.747), 'Visual': {'Color': (0.6600000262260437, 0.5600000023841858, 0.550000011920929, 1.0), 'Model': 'models/props/bottle_red'}}, '1165276044.49kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(2.204, 0.0, 0.0), 'Pos': Point3(-18.007, 40.111, 7.806), 'Scale': VBase3(0.73, 0.73, 0.73), 'Visual': {'Color': (0.30000001192092896, 0.30000001192092896, 0.30000001192092896, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165276973.66kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-55.364, 22.8, 10.403), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.44999998807907104, 0.550000011920929, 0.44999998807907104, 1.0), 'Model': 'models/props/bottle_green'}}, '1165277980.56kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(-3.423, 0.0, 0.0), 'Pos': Point3(-54.921, 20.908, 10.333), 'Scale': VBase3(0.532, 0.532, 0.532), 'Visual': {'Color': (0.75, 0.9300000071525574, 1.0, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165278031.19kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(-3.423, 0.0, 0.0), 'Pos': Point3(-55.41, 20.632, 10.324), 'Scale': VBase3(0.699, 0.699, 0.699), 'Visual': {'Color': (0.75, 0.9300000071525574, 1.0, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165278070.2kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(-109.923, 0.0, 0.0), 'Pos': Point3(-54.633, 19.913, 10.361), 'Scale': VBase3(1.343, 1.343, 1.343), 'Visual': {'Model': 'models/props/winebottle_B'}}, '1165278109.69kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(117.614, 0.0, 0.0), 'Pos': Point3(-55.479, 18.057, 10.334), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/waterpitcher'}}, '1165278149.66kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.0, 0.0, -0.416), 'Pos': Point3(-55.645, 16.916, 10.334), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5199999809265137, 0.5199999809265137, 0.5299999713897705, 1.0), 'Model': 'models/props/pitcher_brown'}}, '1165278170.14kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-54.855, 16.36, 10.174), 'Scale': VBase3(0.757, 0.757, 0.757), 'Visual': {'Color': (0.5, 0.5, 0.5, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165278214.22kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-55.864, 15.77, 10.166), 'Scale': VBase3(0.919, 0.919, 0.919), 'Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0), 'Model': 'models/props/bottle_red'}}, '1165278287.33kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-56.297, 15.068, 10.175), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5899999737739563, 0.5299999713897705, 0.44999998807907104, 1.0), 'Model': 'models/props/bottle_green'}}, '1165278315.5kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-56.269, 13.525, 10.074), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0), 'Model': 'models/props/bottle_brown'}}, '1165278362.8kmuller': {'Type': 'Cups', 'Hpr': VBase3(-107.134, 0.0, 0.0), 'Pos': Point3(-56.66, 7.114, 9.941), 'Scale': VBase3(1.223, 1.223, 1.223), 'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0), 'Model': 'models/props/beerstein'}}, '1165278396.64kmuller': {'Type': 'Cups', 'Hpr': VBase3(144.653, 0.0, 0.0), 'Pos': Point3(-56.582, 6.327, 9.914), 'Scale': VBase3(1.223, 1.223, 1.223), 'Visual': {'Color': (0.6000000238418579, 0.7200000286102295, 0.6000000238418579, 1.0), 'Model': 'models/props/beerstein'}}, '1165278441.47kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(171.096, 0.0, 0.0), 'Pos': Point3(-56.351, 4.181, 9.869), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5, 0.5, 0.5, 1.0), 'Model': 'models/props/bottle_tan'}}, '1165278486.0kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-56.412, 3.154, 9.819), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0), 'Model': 'models/props/bottle_red'}}, '1165278512.41kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-58.689, 1.149, 9.701), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.7099999785423279, 0.6700000166893005, 0.6000000238418579, 1.0), 'Model': 'models/props/bottle_brown'}}, '1165278611.91kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-58.317, -3.899, 9.722), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0), 'Model': 'models/props/bottle_green'}}, '1165278631.7kmuller': {'Type': 'Jugs_and_Jars', 'Hpr': VBase3(0.0, 0.0, 0.0), 'Pos': Point3(-58.459, -5.388, 9.658), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5, 0.5, 0.5, 1.0), 'Model': 'models/props/bottle_green'}}, '1166125378.51kmuller': {'Type': 'Light_Fixtures', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(0.899, -1.326, 3.956), 'Scale': VBase3(1.271, 1.271, 1.271), 'Visual': {'Model': 'models/props/lamp_table_hurricane_oil'}}, '1166125484.09kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(32.096, 0.0, 0.0), 'Pos': Point3(-54.06, 0.32, 0.975), 'Scale': VBase3(0.773, 0.773, 0.773), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/props/barrel_grey'}}, '1166126336.53kmuller': {'Type': 'Food', 'Hpr': VBase3(0.0, 0.0, 1.118), 'Pos': Point3(-7.079, 32.046, 7.804), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.800000011920929, 0.7900000214576721, 0.6000000238418579, 1.0), 'Model': 'models/props/sausage'}}, '1171238953.92MAsaduzz': {'Type': 'Townsperson', 'Category': 'Cast', 'AnimSet': 'tut_dan_idle', 'CustomModel': 'None', 'Hpr': VBase3(180.0, 0.0, 0.0), 'Patrol Radius': 12, 'Pos': Point3(1.5, 34.837, 1.082), 'PoseAnim': '', 'PoseFrame': '', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Start State': 'Idle', 'Team': 'Player'}, '1174958597.14dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '38.8636', 'DropOff': '4.0909', 'FlickRate': 0.5, 'Flickering': False, 'Hpr': VBase3(-7.125, -17.967, -2.814), 'Intensity': '1.0000', 'LightType': 'SPOT', 'Pos': Point3(6.978, 18.883, 10.024), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/light_tool_bulb'}}, '1174958743.41dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'FlickRate': 0.5, 'Flickering': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Intensity': '0.5152', 'LightType': 'AMBIENT', 'Pos': Point3(-19.633, 9.492, 3.636), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}, '1174961658.64dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'FlickRate': 0.5, 'Flickering': True, 'Hpr': VBase3(7.496, -81.833, -71.16), 'Intensity': '0.0758', 'LightType': 'POINT', 'Pos': Point3(-29.125, 14.291, 12.777), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 1.0, 1.0), 'Model': 'models/props/light_tool_bulb'}}, '1174963688.81dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'FlickRate': 0.5, 'Flickering': True, 'Hpr': VBase3(-83.067, -8.288, -12.746), 'Intensity': '0.3485', 'LightType': 'SPOT', 'Pos': Point3(-7.027, 25.879, 8.883), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/light_tool_bulb'}}, '1200528384.0jubutler1': {'Type': 'Door Locator Node', 'Name': 'door_locator', 'Hpr': VBase3(78.799, 0.0, 0.0), 'Pos': Point3(19.911, -9.53, 0.487), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1200528384.0jubutler2': {'Type': 'Door Locator Node', 'Name': 'door_locator_2', 'Hpr': VBase3(-180.0, 0.0, 0.0), 'Pos': Point3(-42.937, 40.889, 0.736), 'Scale': VBase3(1.0, 1.0, 1.0), 'TargetUIDs': ['1190757402.45joswilso']}}, 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/buildings/interior_tavern_c'}}}, 'Node Links': [], 'Layers': {}, 'ObjectIds': {'1121212983.08Shochet0': '["Objects"]["1121212983.08Shochet0"]', '1154731709.64jubutler': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1154731709.64jubutler"]', '1165268405.64kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165268405.64kmuller"]', '1165268489.64kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165268489.64kmuller"]', '1165268495.0kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165268495.0kmuller"]', '1165268541.81kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165268541.81kmuller"]', '1165268554.8kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165268554.8kmuller"]', '1165268615.13kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165268615.13kmuller"]', '1165268794.17kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165268794.17kmuller"]', '1165269869.89kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165269869.89kmuller"]', '1165270073.72kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165270073.72kmuller"]', '1165270537.52kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165270537.52kmuller"]', '1165270634.13kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165270634.13kmuller"]', '1165270678.5kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165270678.5kmuller"]', '1165270699.58kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165270699.58kmuller"]', '1165270724.27kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165270724.27kmuller"]', '1165270820.19kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165270820.19kmuller"]', '1165270875.49kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165270875.49kmuller"]', '1165270982.7kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165270982.7kmuller"]', '1165271086.06kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271086.06kmuller"]', '1165271282.78kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271282.78kmuller"]', '1165271328.08kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271328.08kmuller"]', '1165271359.02kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271359.02kmuller"]', '1165271411.02kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271411.02kmuller"]', '1165271436.39kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271436.39kmuller"]', '1165271581.41kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271581.41kmuller"]', '1165271600.35kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271600.35kmuller"]', '1165271626.24kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271626.24kmuller"]', '1165271663.45kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271663.45kmuller"]', '1165271702.97kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271702.97kmuller"]', '1165271705.67kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165271705.67kmuller"]', '1165272003.78kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165272003.78kmuller"]', '1165272035.33kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165272035.33kmuller"]', '1165272160.13kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165272160.13kmuller"]', '1165272166.24kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165272166.24kmuller"]', '1165272219.58kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165272219.58kmuller"]', '1165272967.63kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165272967.63kmuller"]', '1165272984.3kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165272984.3kmuller"]', '1165272995.24kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165272995.24kmuller"]', '1165273011.24kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273011.24kmuller"]', '1165273214.85kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273214.85kmuller"]', '1165273221.42kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273221.42kmuller"]', '1165273270.28kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273270.28kmuller"]', '1165273300.45kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273300.45kmuller"]', '1165273348.77kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273348.77kmuller"]', '1165273398.75kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273398.75kmuller"]', '1165273535.11kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273535.11kmuller"]', '1165273600.94kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273600.94kmuller"]', '1165273684.05kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273684.05kmuller"]', '1165273741.35kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273741.35kmuller"]', '1165273820.41kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273820.41kmuller"]', '1165273860.75kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273860.75kmuller"]', '1165273910.55kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273910.55kmuller"]', '1165273998.72kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273998.72kmuller"]', '1165274008.95kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274008.95kmuller"]', '1165274017.97kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274017.97kmuller"]', '1165274050.94kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274050.94kmuller"]', '1165274102.6kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274102.6kmuller"]', '1165274126.86kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274126.86kmuller"]', '1165274146.16kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274146.16kmuller"]', '1165274148.94kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274148.94kmuller"]', '1165274166.8kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274166.8kmuller"]', '1165274656.42kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274656.42kmuller"]', '1165274726.1kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274726.1kmuller"]', '1165274773.6kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274773.6kmuller"]', '1165274848.8kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274848.8kmuller"]', '1165274894.11kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274894.11kmuller"]', '1165274931.35kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274931.35kmuller"]', '1165274976.27kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165274976.27kmuller"]', '1165275023.81kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275023.81kmuller"]', '1165275062.16kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275062.16kmuller"]', '1165275084.2kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275084.2kmuller"]', '1165275123.75kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275123.75kmuller"]', '1165275138.58kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275138.58kmuller"]', '1165275160.58kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275160.58kmuller"]', '1165275186.05kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275186.05kmuller"]', '1165275192.22kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275192.22kmuller"]', '1165275274.69kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275274.69kmuller"]', '1165275305.1kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275305.1kmuller"]', '1165275369.2kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275369.2kmuller"]', '1165275380.81kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275380.81kmuller"]', '1165275418.02kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275418.02kmuller"]', '1165275431.75kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275431.75kmuller"]', '1165275460.86kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275460.86kmuller"]', '1165275483.31kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275483.31kmuller"]', '1165275502.77kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275502.77kmuller"]', '1165275545.97kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275545.97kmuller"]', '1165275567.7kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275567.7kmuller"]', '1165275597.6kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275597.6kmuller"]', '1165275616.55kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275616.55kmuller"]', '1165275673.27kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275673.27kmuller"]', '1165275716.6kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275716.6kmuller"]', '1165275743.11kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275743.11kmuller"]', '1165275789.53kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275789.53kmuller"]', '1165275887.19kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275887.19kmuller"]', '1165275933.31kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275933.31kmuller"]', '1165275976.81kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165275976.81kmuller"]', '1165276023.44kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165276023.44kmuller"]', '1165276044.49kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165276044.49kmuller"]', '1165276973.66kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165276973.66kmuller"]', '1165277930.75kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273535.11kmuller"]["Objects"]["1165277930.75kmuller"]', '1165277980.56kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165277980.56kmuller"]', '1165278031.19kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278031.19kmuller"]', '1165278070.2kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278070.2kmuller"]', '1165278109.69kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278109.69kmuller"]', '1165278149.66kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278149.66kmuller"]', '1165278170.14kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278170.14kmuller"]', '1165278214.22kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278214.22kmuller"]', '1165278287.33kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278287.33kmuller"]', '1165278315.5kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278315.5kmuller"]', '1165278362.8kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278362.8kmuller"]', '1165278396.64kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278396.64kmuller"]', '1165278441.47kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278441.47kmuller"]', '1165278486.0kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278486.0kmuller"]', '1165278512.41kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278512.41kmuller"]', '1165278536.22kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165273820.41kmuller"]["Objects"]["1165278536.22kmuller"]', '1165278611.91kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278611.91kmuller"]', '1165278631.7kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1165278631.7kmuller"]', '1166125378.51kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1166125378.51kmuller"]', '1166125484.09kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1166125484.09kmuller"]', '1166126336.53kmuller': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1166126336.53kmuller"]', '1171238953.92MAsaduzz': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1171238953.92MAsaduzz"]', '1174958597.14dzlu': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1174958597.14dzlu"]', '1174958743.41dzlu': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1174958743.41dzlu"]', '1174961658.64dzlu': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1174961658.64dzlu"]', '1174963688.81dzlu': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1174963688.81dzlu"]', '1200528384.0jubutler1': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1200528384.0jubutler1"]', '1200528384.0jubutler2': '["Objects"]["1121212983.08Shochet0"]["Objects"]["1200528384.0jubutler2"]'}}
extraInfo = {'camPos': Point3(-85.9331, 83.2949, 33.1737), 'camHpr': VBase3(-141, -21, 0), 'focalLength': 1.39999997616}
| 6,743
| 46,781
| 0.667931
| 6,501
| 47,201
| 4.814644
| 0.128134
| 0.027476
| 0.029712
| 0.02607
| 0.658466
| 0.498243
| 0.442939
| 0.395176
| 0.384888
| 0.383802
| 0
| 0.336548
| 0.068579
| 47,201
| 7
| 46,782
| 6,743
| 0.375398
| 0.005021
| 0
| 0
| 0
| 0
| 0.501661
| 0.253556
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6308cdba07d669eed5899f6447b37eccfe0c63ca
| 154
|
py
|
Python
|
randomwordgenerator/__init__.py
|
Npascetti/RandomWordGenerator
|
d32f085d8511d9f67c057e049a5a8b1c17361800
|
[
"MIT"
] | 3
|
2016-11-09T19:59:41.000Z
|
2019-11-26T03:07:36.000Z
|
randomwordgenerator/__init__.py
|
Npascetti/RandomWordGenerator
|
d32f085d8511d9f67c057e049a5a8b1c17361800
|
[
"MIT"
] | 3
|
2018-09-29T21:13:21.000Z
|
2021-01-12T02:02:43.000Z
|
randomwordgenerator/__init__.py
|
Npascetti/RandomWordGenerator
|
d32f085d8511d9f67c057e049a5a8b1c17361800
|
[
"MIT"
] | 2
|
2018-08-06T21:49:56.000Z
|
2019-10-26T13:29:00.000Z
|
import os
import sys
_THIS_DIR_NAME = os.path.dirname(__file__)
_THIS_DIR_ABS_PATH = os.path.realpath(_THIS_DIR_NAME)
sys.path.append(_THIS_DIR_ABS_PATH)
| 25.666667
| 53
| 0.837662
| 28
| 154
| 3.964286
| 0.428571
| 0.252252
| 0.198198
| 0.252252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 154
| 6
| 54
| 25.666667
| 0.776224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6314c1819e66d3661d9284a7c6efe0ab322baa43
| 1,906
|
py
|
Python
|
sahl/util/git.py
|
karimpedia/sahl
|
6ad89618e63789e3d1cb8512084d616a81a6f74f
|
[
"MIT"
] | null | null | null |
sahl/util/git.py
|
karimpedia/sahl
|
6ad89618e63789e3d1cb8512084d616a81a6f74f
|
[
"MIT"
] | null | null | null |
sahl/util/git.py
|
karimpedia/sahl
|
6ad89618e63789e3d1cb8512084d616a81a6f74f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 16:10:19 2017
@author: ks
"""
# ======================================================================================================================
# ----------------------------------------------------------------------------------------------------- Priority Imports
# -------------------------------------------------------------------------------------------------- Python Lib. Imports
import subprocess
# ----------------------------------------------------------------------------------------------- 3rd Party Lib. Imports
# ---------------------------------------------------------------------------------------------------- Developer Imports
# ------------------------------------------------------------------------------------------------- This package Imports
# ---------------------------------------------------------------------------------------------- This experiment Imports
# ----------------------------------------------------------------------------------------------------------------------
# ======================================================================================================================
# ======================================================================================================================
# ----------------------------------------------------------------------------------------------------------------------
def get_git_revision_hash():
return subprocess.check_output(['git', 'rev-parse', 'HEAD'])
def get_git_revision_short_hash():
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])
# ----------------------------------------------------------------------------------------------------------------------
# ======================================================================================================================
| 59.5625
| 120
| 0.171039
| 65
| 1,906
| 4.876923
| 0.646154
| 0.063091
| 0.056782
| 0.107256
| 0.264984
| 0.264984
| 0.264984
| 0.264984
| 0
| 0
| 0
| 0.008264
| 0.047744
| 1,906
| 31
| 121
| 61.483871
| 0.166391
| 0.859916
| 0
| 0
| 0
| 0
| 0.159184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2d6d0fc5cf3bca48306b93858536308fa9a083cc
| 69
|
py
|
Python
|
lib/lib/utils/__init__.py
|
trouleau/noisy-hawkes-cumulants
|
a183a766807a714ca4338f09249d4ddc4e9a11a7
|
[
"MIT"
] | 1
|
2021-07-22T05:16:13.000Z
|
2021-07-22T05:16:13.000Z
|
lib/lib/utils/__init__.py
|
trouleau/noisy-hawkes-cumulants
|
a183a766807a714ca4338f09249d4ddc4e9a11a7
|
[
"MIT"
] | null | null | null |
lib/lib/utils/__init__.py
|
trouleau/noisy-hawkes-cumulants
|
a183a766807a714ca4338f09249d4ddc4e9a11a7
|
[
"MIT"
] | null | null | null |
from . import metrics
from . import cumulants
from . import plotting
| 17.25
| 23
| 0.782609
| 9
| 69
| 6
| 0.555556
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 69
| 3
| 24
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2deb94a57917065f72be1958f715f8694beddee7
| 22
|
py
|
Python
|
Modules/vms/dscdef/dscdef.py
|
vmssoftware/cpython
|
b5d2c7f578d33963798a02ca32f0c151c908aa7c
|
[
"0BSD"
] | 2
|
2021-10-06T15:46:53.000Z
|
2022-01-26T02:58:54.000Z
|
Modules/vms/dscdef/dscdef.py
|
vmssoftware/cpython
|
b5d2c7f578d33963798a02ca32f0c151c908aa7c
|
[
"0BSD"
] | null | null | null |
Modules/vms/dscdef/dscdef.py
|
vmssoftware/cpython
|
b5d2c7f578d33963798a02ca32f0c151c908aa7c
|
[
"0BSD"
] | null | null | null |
from _dscdef import *
| 11
| 21
| 0.772727
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
93152e94b0c4ab82605dd22422044271e8fceb44
| 27
|
py
|
Python
|
dpipe/medim/hsv.py
|
samokhinv/deep_pipe
|
9461b02f5f32c3e9f24490619ebccf417979cffc
|
[
"MIT"
] | null | null | null |
dpipe/medim/hsv.py
|
samokhinv/deep_pipe
|
9461b02f5f32c3e9f24490619ebccf417979cffc
|
[
"MIT"
] | null | null | null |
dpipe/medim/hsv.py
|
samokhinv/deep_pipe
|
9461b02f5f32c3e9f24490619ebccf417979cffc
|
[
"MIT"
] | null | null | null |
from dpipe.im.hsv import *
| 13.5
| 26
| 0.740741
| 5
| 27
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9316f43f6465c00ad6c141120a0b36299d51ae03
| 11,998
|
py
|
Python
|
model.py
|
tushartushar/designite_util
|
1443b401d336acdcaf4e3d9a5ea4a41be85c825d
|
[
"Apache-2.0"
] | null | null | null |
model.py
|
tushartushar/designite_util
|
1443b401d336acdcaf4e3d9a5ea4a41be85c825d
|
[
"Apache-2.0"
] | null | null | null |
model.py
|
tushartushar/designite_util
|
1443b401d336acdcaf4e3d9a5ea4a41be85c825d
|
[
"Apache-2.0"
] | null | null | null |
import re
from constants import *
class Type:
def __init__(self, project_name, package_name, type_name, file_path, start_line_no):
self.project_name = project_name
self.package_name = package_name
self.type_name = type_name
self.file_path = file_path
self.start_line_no = start_line_no
class Method:
def __init__(self, project_name, package_name, type_name, method_name, start_line_no):
self.project_name = project_name
self.package_name = package_name
self.type_name = type_name
self.method_name = method_name
self.start_line_no = start_line_no
class ImplSmell:
def __init__(self, project_name, package_name, type_name, method_name, smell_name, cause, line_no):
self.project_name = project_name.strip('\n')
self.package_name = package_name
self.type_name = type_name
self.method_name = method_name
self.smell_name = smell_name
self.cause = cause.strip('\n')
self.m_start_line_no = line_no.strip('\n')
self.matched = False
self.before_metric = 0
self.after_metric = 0
self.change_in_metric = 0
def __str__(self):
return self.project_name + ', ' + self.package_name + ', ' + self.type_name + ', ' + self.method_name + ', ' + self.smell_name + ', ' + self.cause + ', ' + self.m_start_line_no.strip(
'\n') + ', ' + str(
self.before_metric) + ', ' + str(self.after_metric) + ', ' + str(self.change_in_metric)
def is_smell_present(self, smell_list):
filtered_list = filter(lambda item:
item.smell_name == self.smell_name and
item.project_name == self.project_name and
item.package_name == self.package_name and
item.type_name == self.type_name and
item.method_name == self.method_name,
smell_list)
for item in filtered_list:
if not item.matched:
item.matched = True
return True, item
return False, None
def populate_diff_metrics(self, similar_smell):
if self.smell_name == I_COMP_COND:
stmt1 = self.cause.replace('is complex.', '').strip()
stmt2 = similar_smell.cause.replace('is complex.', '').strip()
self.before_metric = stmt2.count('&&') + stmt2.count('||')
self.after_metric = stmt1.count('&&') + stmt1.count('||')
self.change_in_metric = self.after_metric - self.before_metric
elif self.smell_name == I_COMP_MTD or self.smell_name == I_LONG_ID or \
self.smell_name == I_LONG_STMT:
# rest of the three smells differ in a metric
m1 = re.search(r'is (\d+)', self.cause)
m2 = re.search(r'is (\d+)', similar_smell.cause)
if m1 and m2:
no1 = int(m1.group(1))
no2 = int(m2.group(1))
self.before_metric = no2
self.after_metric = no1
self.change_in_metric = no1 - no2
elif self.smell_name == I_LONG_PARAM_LIST or \
self.smell_name == I_LONG_MTD:
# rest of the three smells differ in a metric
m1 = re.search(r'has (\d+)', self.cause)
m2 = re.search(r'has (\d+)', similar_smell.cause)
if m1 and m2:
no1 = int(m1.group(1))
no2 = int(m2.group(1))
self.before_metric = no2
self.after_metric = no1
self.change_in_metric = no1 - no2
elif self.smell_name == I_MAGIC_NO:
# rest of the three smells differ in a metric
m1 = re.search(r': (\d+)', self.cause)
m2 = re.search(r': (\d+)', similar_smell.cause)
if m1 and m2:
no1 = int(m1.group(1))
no2 = int(m2.group(1))
self.before_metric = no2
self.after_metric = no1
self.change_in_metric = no1 - no2
class DesignSmell:
def __init__(self, project_name, package_name, type_name, smell_name, cause):
self.project_name = project_name.strip('\n')
self.package_name = package_name
self.type_name = type_name
self.smell_name = smell_name.strip()
self.cause = cause.strip('\n')
self.matched = False
self.before_metric = 0
self.after_metric = 0
self.change_in_metric = 0
def __str__(self):
return self.project_name + ', ' + self.package_name + ', ' + self.type_name + ', ' + self.smell_name + ', ' + self.cause + ', ' + str(
self.before_metric) + ', ' + str(self.after_metric) + ', ' + str(self.change_in_metric)
def is_smell_present(self, smell_list):
filtered_list = filter(lambda item:
item.smell_name == self.smell_name and
item.project_name == self.project_name and
item.package_name == self.package_name and
item.type_name == self.type_name,
smell_list)
for item in filtered_list:
if not item.matched:
item.matched = True
return True, item
return False, None
def populate_diff_metrics(self, similar_smell):
common_str = common_substring_from_start(self.cause, similar_smell.cause)
cause1 = self.cause.replace(common_str, '').rstrip('.')
cause2 = similar_smell.cause.replace(common_str, '').rstrip('.')
if self.smell_name == D_BRO_MOD or self.smell_name == D_BRO_HIE or self.smell_name == D_CYC_MOD or self.smell_name == D_CYC_HIE or self.smell_name == D_DEF_ENC or self.smell_name == D_FEA_ENV or self.smell_name == D_UNN_ABS or self.smell_name == D_IMP_ABS or self.smell_name == D_MUL_HIE or self.smell_name == D_REB_HIE or self.smell_name == D_WID_HIE:
cause1_set = set([x.strip() for x in cause1.split(';')])
cause2_set = set([x.strip() for x in cause2.split(';')])
diff1 = cause1_set.difference(cause2_set)
diff2 = cause2_set.difference(cause1_set)
# this set of smells produce a list of classes and we need to figure out how many classes are different
# prev metric, new metric, difference
self.before_metric = len(cause2_set)
self.after_metric = len(cause1_set)
self.change_in_metric = len(diff1) + len(diff2)
elif self.smell_name == D_UXP_ENC or self.smell_name == D_MIS_HIE:
# these smells report slightly complex cause; need to parse the text with markers
types1 = cause1.rpartition('in method')[0]
types2 = cause2.rpartition('in method')[0]
cause1_set = set([x.strip() for x in types1.split(';')])
cause2_set = set([x.strip() for x in types2.split(';')])
diff1 = cause1_set.difference(cause2_set)
diff2 = cause2_set.difference(cause1_set)
self.before_metric = len(cause2_set)
self.after_metric = len(cause1_set)
self.change_in_metric = len(diff1) + len(diff2)
elif self.smell_name == D_HUB_MOD:
# these smells report slightly complex cause; need to parse the text with markers
types1 = cause1.rpartition('Outgoing dependencies:')[0]
types1_in = types1.rpartition('Incoming dependencies:')[2].rstrip('.')
types1_out = cause1.rpartition('Outgoing dependencies:')[2]
types2 = cause2.rpartition('Outgoing dependencies:')[0]
types2_in = types2.rpartition('Incoming dependencies:')[2].rstrip('.')
types2_out = cause1.rpartition('Outgoing dependencies:')[2]
cause1_set_in = set([x.strip() for x in types1_in.split(';')])
cause1_set_out = set([x.strip() for x in types1_out.split(';')])
cause1_set = cause1_set_in.union(cause1_set_out)
cause2_set_in = set([x.strip() for x in types2_in.split(';')])
cause2_set_out = set([x.strip() for x in types2_out.split(';')])
cause2_set = cause2_set_in.union(cause2_set_out)
diff1 = cause1_set.difference(cause2_set)
diff2 = cause2_set.difference(cause1_set)
self.before_metric = len(cause2_set)
self.after_metric = len(cause1_set)
self.change_in_metric = len(diff1) + len(diff2)
elif self.smell_name == D_INS_MOD or self.smell_name == D_DEE_HIE:
# rest of the three smells differ in a metric
m1 = re.search(r'\s*(\d+\.?\d*)', cause1)
m2 = re.search(r'\s*(\d+\.?\d*)', cause2)
if m1 and m2:
no1 = float(m1.group(1))
no2 = float(m2.group(1))
self.before_metric = no2
self.after_metric = no1
self.change_in_metric = round(no1 - no2, 2)
class ArchSmell:
def __init__(self, project_name, package_name, smell_name, cause):
self.project_name = project_name.strip('\n')
self.package_name = package_name
self.smell_name = smell_name
self.cause = cause.strip('\n')
self.matched = False
self.before_metric = 0
self.after_metric = 0
self.change_in_metric = 0
def __str__(self):
return self.project_name + ', ' + self.package_name + ', ' + self.smell_name + ', ' + self.cause + ', ' + str(
self.before_metric) + ', ' + str(self.after_metric) + ', ' + str(self.change_in_metric)
def is_smell_present(self, smell_list):
filtered_list = filter(lambda item:
item.smell_name == self.smell_name and
item.project_name == self.project_name and
item.package_name == self.package_name,
smell_list)
for item in filtered_list:
if not item.matched:
item.matched = True
return True, item
return False, None
def populate_diff_metrics(self, similar_smell):
common_str = common_substring_from_start(self.cause, similar_smell.cause)
cause1 = self.cause.replace(common_str, '').rstrip('.')
cause2 = similar_smell.cause.replace(common_str, '').rstrip('.')
cause1_set = set([x.strip() for x in cause1.split(';')])
cause2_set = set([x.strip() for x in cause2.split(';')])
diff1 = cause1_set.difference(cause2_set)
diff2 = cause2_set.difference(cause1_set)
if self.smell_name == A_CYC_DEP or self.smell_name == A_UNS_DEP or self.smell_name == A_SCA_FUN or self.smell_name == A_AMB_INT:
# this set of smells produce a list of classes and we need to figure out how many classes are different
# prev metric, new metric, difference
self.before_metric = len(cause2_set)
self.after_metric = len(cause1_set)
self.change_in_metric = len(diff1) + len(diff2)
else:
# rest of the three smells differ in a metric
m1 = re.search(r'[:=]\s*(\d+\.?\d*)', self.cause)
m2 = re.search(r'[:=]\s*(\d+\.?\d*)', similar_smell.cause)
if m1 and m2:
no1 = float(m1.group(1))
no2 = float(m2.group(1))
self.before_metric = no2
self.after_metric = no1
self.change_in_metric = round(no1 - no2, 2)
def common_substring_from_start(str_a, str_b):
""" returns the longest common substring from the beginning of str_a and str_b """
def _iter():
for a, b in zip(str_a, str_b):
if a == b:
yield a
if a == ':' or b == ':':
return
else:
return
return ''.join(_iter())
| 47.422925
| 360
| 0.580513
| 1,560
| 11,998
| 4.201282
| 0.104487
| 0.061794
| 0.071407
| 0.041196
| 0.864968
| 0.824382
| 0.757705
| 0.731767
| 0.706744
| 0.687672
| 0
| 0.022107
| 0.310052
| 11,998
| 252
| 361
| 47.611111
| 0.76963
| 0.06101
| 0
| 0.660377
| 0
| 0
| 0.032892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.009434
| 0.014151
| 0.160377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
931d81e8dd2d58de4aba1123beed746c10fc06cc
| 19,505
|
py
|
Python
|
tests/executor/test_serial.py
|
adir-intsights/sergeant
|
76229b045309a3d795ac760d9f08da04b5e0a750
|
[
"MIT"
] | null | null | null |
tests/executor/test_serial.py
|
adir-intsights/sergeant
|
76229b045309a3d795ac760d9f08da04b5e0a750
|
[
"MIT"
] | null | null | null |
tests/executor/test_serial.py
|
adir-intsights/sergeant
|
76229b045309a3d795ac760d9f08da04b5e0a750
|
[
"MIT"
] | null | null | null |
import unittest
import unittest.mock
import time
import sergeant.worker
import sergeant.executor
import sergeant.config
class SerialTestCase(
unittest.TestCase,
):
def setUp(
self,
):
self.worker = unittest.mock.MagicMock()
self.worker.config = sergeant.config.WorkerConfig(
name='test_worker',
connector=sergeant.config.Connector(
type='',
params={},
),
)
self.worker.work = unittest.mock.MagicMock(
return_value=True,
)
self.worker.pre_work = unittest.mock.MagicMock()
self.worker.post_work = unittest.mock.MagicMock()
self.worker.handle_success = unittest.mock.MagicMock()
self.worker.handle_timeout = unittest.mock.MagicMock()
self.worker.handle_failure = unittest.mock.MagicMock()
self.worker.handle_retry = unittest.mock.MagicMock()
self.worker.handle_max_retries = unittest.mock.MagicMock()
self.worker._requeue = unittest.mock.MagicMock()
self.exception = Exception('some exception')
def test_pre_work(
self,
):
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
serial_executor.killer = unittest.mock.MagicMock()
task = sergeant.objects.Task()
self.assertFalse(
expr=serial_executor.currently_working,
)
serial_executor.pre_work(
task=task,
)
self.worker.pre_work.assert_called_once_with(
task=task,
)
self.worker.logger.error.assert_not_called()
self.assertTrue(
expr=serial_executor.currently_working,
)
serial_executor.killer.start.assert_not_called()
serial_executor.should_use_a_killer = True
serial_executor.pre_work(
task=task,
)
serial_executor.killer.start.assert_called_once()
self.worker.pre_work.side_effect = Exception('exception message')
serial_executor.pre_work(
task=task,
)
self.worker.logger.error.assert_called_once_with(
msg='pre_work has failed: exception message',
extra={
'task': task,
},
)
def test_post_work(
self,
):
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
serial_executor.killer = unittest.mock.MagicMock()
task = sergeant.objects.Task()
serial_executor.currently_working = True
self.assertTrue(
expr=serial_executor.currently_working,
)
serial_executor.post_work(
task=task,
success=True,
exception=None,
)
self.worker.post_work.assert_called_once_with(
task=task,
success=True,
exception=None,
)
self.worker.logger.error.assert_not_called()
self.assertFalse(
expr=serial_executor.currently_working,
)
serial_executor.killer.stop_and_reset.assert_not_called()
serial_executor.should_use_a_killer = True
serial_executor.post_work(
task=task,
success=True,
exception=None,
)
serial_executor.killer.stop_and_reset.assert_called_once()
exception = Exception('exception message')
self.worker.post_work.side_effect = exception
serial_executor.post_work(
task=task,
success=True,
exception=None,
)
self.worker.logger.error.assert_called_once_with(
msg='post_work has failed: exception message',
extra={
'task': task,
'success': True,
'exception': exception,
},
)
def test_success(
self,
):
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
task = sergeant.objects.Task()
serial_executor.execute_tasks(
tasks=[task],
)
self.worker.work.assert_called_once_with(
task=task,
)
self.worker.pre_work.assert_called_once_with(
task=task,
)
self.worker.post_work.assert_called_once_with(
task=task,
success=True,
exception=None,
)
self.worker.handle_success.assert_called_once_with(
task=task,
returned_value=True,
)
self.worker.handle_failure.assert_not_called()
self.worker.handle_timeout.assert_not_called()
self.worker.handle_retry.assert_not_called()
self.worker.handle_max_retries.assert_not_called()
self.worker.handle_requeue.assert_not_called()
self.assertIsNone(
obj=getattr(serial_executor, 'killer', None),
)
def test_failure(
self,
):
def raise_exception_work_method(
task,
):
raise self.exception
self.worker.work = unittest.mock.MagicMock(
side_effect=lambda task: raise_exception_work_method(task),
)
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
task = sergeant.objects.Task()
serial_executor.execute_tasks(
tasks=[task],
)
self.worker.work.assert_called_once_with(
task=task,
)
self.worker.pre_work.assert_called_once_with(
task=task,
)
self.worker.post_work.assert_called_once_with(
task=task,
success=False,
exception=self.exception,
)
self.worker.handle_failure.assert_called_once()
self.assertEqual(
first=self.worker.handle_failure.call_args[1]['task'],
second=task,
)
self.assertIsInstance(
obj=self.worker.handle_failure.call_args[1]['exception'],
cls=Exception,
)
self.assertEqual(
first=self.worker.handle_failure.call_args[1]['exception'],
second=self.exception,
)
self.worker.handle_success.assert_not_called()
self.worker.handle_timeout.assert_not_called()
self.worker.handle_retry.assert_not_called()
self.worker.handle_max_retries.assert_not_called()
self.worker.handle_requeue.assert_not_called()
self.assertIsNone(
obj=getattr(serial_executor, 'killer', None),
)
def test_soft_timeout(
self,
):
def timeout_work_method(
task,
):
while True:
time.sleep(0.1)
self.worker.work = unittest.mock.MagicMock(
side_effect=lambda task: timeout_work_method(task),
)
self.worker.config = self.worker.config.replace(
timeouts=sergeant.config.Timeouts(
soft_timeout=0.3,
),
)
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
task = sergeant.objects.Task()
serial_executor.execute_tasks(
tasks=[task],
)
self.worker.work.assert_called_once_with(
task=task,
)
self.worker.pre_work.assert_called_once_with(
task=task,
)
self.worker.post_work.assert_called_once()
self.assertEqual(
first=self.worker.post_work.call_args[1]['task'],
second=task,
)
self.assertFalse(
expr=self.worker.post_work.call_args[1]['success'],
)
self.assertIsInstance(
obj=self.worker.post_work.call_args[1]['exception'],
cls=sergeant.worker.WorkerSoftTimedout,
)
self.worker.handle_timeout.assert_called_once_with(
task=task,
)
self.worker.handle_success.assert_not_called()
self.worker.handle_failure.assert_not_called()
self.worker.handle_retry.assert_not_called()
self.worker.handle_max_retries.assert_not_called()
self.worker.handle_requeue.assert_not_called()
self.assertIsNotNone(
obj=serial_executor.killer,
)
def test_hard_timeout(
self,
):
def timeout_work_method(
task,
):
while True:
time.sleep(0.1)
self.worker.work = unittest.mock.MagicMock(
side_effect=lambda task: timeout_work_method(task),
)
self.worker.config = self.worker.config.replace(
timeouts=sergeant.config.Timeouts(
hard_timeout=0.3,
),
)
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
task = sergeant.objects.Task()
serial_executor.execute_tasks(
tasks=[task],
)
self.worker.work.assert_called_once_with(
task=task,
)
self.worker.pre_work.assert_called_once_with(
task=task,
)
self.worker.post_work.assert_called_once()
self.assertEqual(
first=self.worker.post_work.call_args[1]['task'],
second=task,
)
self.assertFalse(
expr=self.worker.post_work.call_args[1]['success'],
)
self.assertIsInstance(
obj=self.worker.post_work.call_args[1]['exception'],
cls=sergeant.worker.WorkerHardTimedout,
)
self.worker.handle_timeout.assert_called_once_with(
task=task,
)
self.worker.handle_success.assert_not_called()
self.worker.handle_failure.assert_not_called()
self.worker.handle_retry.assert_not_called()
self.worker.handle_max_retries.assert_not_called()
self.worker.handle_requeue.assert_not_called()
self.assertIsNotNone(
obj=serial_executor.killer,
)
def test_multiple_timeout(
self,
):
def timeout_work_method(
task,
):
while True:
time.sleep(0.1)
self.worker.work = unittest.mock.MagicMock(
side_effect=lambda task: timeout_work_method(task),
)
self.worker.config = self.worker.config.replace(
timeouts=sergeant.config.Timeouts(
soft_timeout=0.3,
),
)
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
task = sergeant.objects.Task()
serial_executor.execute_tasks(
tasks=[task] * 2,
)
self.assertEqual(
first=self.worker.work.call_count,
second=2,
)
self.assertEqual(
first=self.worker.pre_work.call_count,
second=2,
)
self.assertEqual(
first=self.worker.post_work.call_count,
second=2,
)
self.assertEqual(
first=self.worker.handle_timeout.call_count,
second=2,
)
self.worker.handle_success.assert_not_called()
self.worker.handle_failure.assert_not_called()
self.worker.handle_retry.assert_not_called()
self.worker.handle_max_retries.assert_not_called()
self.worker.handle_requeue.assert_not_called()
self.assertIsNotNone(
obj=serial_executor.killer,
)
def test_on_retry(
self,
):
def retry_work_method(
task,
):
raise sergeant.worker.WorkerRetry()
self.worker.work = unittest.mock.MagicMock(
side_effect=lambda task: retry_work_method(task),
)
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
task = sergeant.objects.Task()
serial_executor.execute_tasks(
tasks=[task],
)
self.worker.work.assert_called_once_with(
task=task,
)
self.worker.pre_work.assert_called_once_with(
task=task,
)
self.worker.post_work.assert_called_once()
self.assertEqual(
first=self.worker.post_work.call_args[1]['task'],
second=task,
)
self.assertFalse(
expr=self.worker.post_work.call_args[1]['success'],
)
self.assertIsInstance(
obj=self.worker.post_work.call_args[1]['exception'],
cls=sergeant.worker.WorkerRetry,
)
self.worker.handle_retry.assert_called_once_with(
task=task,
)
self.worker.handle_success.assert_not_called()
self.worker.handle_failure.assert_not_called()
self.worker.handle_timeout.assert_not_called()
self.worker.handle_max_retries.assert_not_called()
self.worker.handle_requeue.assert_not_called()
self.assertIsNone(
obj=getattr(serial_executor, 'killer', None),
)
def test_on_max_retries(
self,
):
def max_retries_work_method(
task,
):
raise sergeant.worker.WorkerMaxRetries()
self.worker.work = unittest.mock.MagicMock(
side_effect=lambda task: max_retries_work_method(task),
)
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
task = sergeant.objects.Task()
serial_executor.execute_tasks(
tasks=[task],
)
self.worker.work.assert_called_once_with(
task=task,
)
self.worker.pre_work.assert_called_once_with(
task=task,
)
self.worker.post_work.assert_called_once()
self.assertEqual(
first=self.worker.post_work.call_args[1]['task'],
second=task,
)
self.assertFalse(
expr=self.worker.post_work.call_args[1]['success'],
)
self.assertIsInstance(
obj=self.worker.post_work.call_args[1]['exception'],
cls=sergeant.worker.WorkerMaxRetries,
)
self.worker.handle_max_retries.assert_called_once_with(
task=task,
)
self.worker.handle_success.assert_not_called()
self.worker.handle_failure.assert_not_called()
self.worker.handle_timeout.assert_not_called()
self.worker.handle_retry.assert_not_called()
self.worker.handle_requeue.assert_not_called()
self.assertIsNone(
obj=getattr(serial_executor, 'killer', None),
)
def test_on_requeue(
self,
):
def requeue_work_method(
task,
):
raise sergeant.worker.WorkerRequeue()
self.worker.work = unittest.mock.MagicMock(
side_effect=lambda task: requeue_work_method(task),
)
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
task = sergeant.objects.Task()
serial_executor.execute_tasks(
tasks=[task],
)
self.worker.work.assert_called_once_with(
task=task,
)
self.worker.pre_work.assert_called_once_with(
task=task,
)
self.worker.post_work.assert_called_once()
self.assertEqual(
first=self.worker.post_work.call_args[1]['task'],
second=task,
)
self.assertFalse(
expr=self.worker.post_work.call_args[1]['success'],
)
self.assertIsInstance(
obj=self.worker.post_work.call_args[1]['exception'],
cls=sergeant.worker.WorkerRequeue,
)
self.worker.handle_requeue.assert_called_once_with(
task=task,
)
self.worker.handle_success.assert_not_called()
self.worker.handle_failure.assert_not_called()
self.worker.handle_timeout.assert_not_called()
self.worker.handle_retry.assert_not_called()
self.worker.handle_max_retries.assert_not_called()
self.assertIsNone(
obj=getattr(serial_executor, 'killer', None),
)
def test_stop(
self,
):
def stop_work_method(
task,
):
sergeant.worker.Worker.stop(None)
self.worker.work = unittest.mock.MagicMock(
side_effect=lambda task: stop_work_method(task),
)
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
task = sergeant.objects.Task()
with self.assertRaises(
expected_exception=sergeant.worker.WorkerStop,
):
serial_executor.execute_tasks(
tasks=[task],
)
self.worker.work.assert_called_once_with(
task=task,
)
self.worker.pre_work.assert_called_once_with(
task=task,
)
self.worker.post_work.assert_called_once()
self.assertEqual(
first=self.worker.post_work.call_args[1]['task'],
second=task,
)
self.assertFalse(
expr=self.worker.post_work.call_args[1]['success'],
)
self.assertIsInstance(
obj=self.worker.post_work.call_args[1]['exception'],
cls=sergeant.worker.WorkerStop,
)
self.worker.handle_success.assert_not_called()
self.worker.handle_failure.assert_not_called()
self.worker.handle_timeout.assert_not_called()
self.worker.handle_retry.assert_not_called()
self.worker.handle_max_retries.assert_not_called()
self.worker.handle_requeue.assert_not_called()
def test_respawn(
self,
):
def respawn_work_method(
task,
):
sergeant.worker.Worker.respawn(None)
self.worker.work = unittest.mock.MagicMock(
side_effect=lambda task: respawn_work_method(task),
)
serial_executor = sergeant.executor.serial.SerialExecutor(
worker_object=self.worker,
)
task = sergeant.objects.Task()
with self.assertRaises(
expected_exception=sergeant.worker.WorkerRespawn,
):
serial_executor.execute_tasks(
tasks=[task],
)
self.worker.work.assert_called_once_with(
task=task,
)
self.worker.pre_work.assert_called_once_with(
task=task,
)
self.worker.post_work.assert_called_once()
self.assertEqual(
first=self.worker.post_work.call_args[1]['task'],
second=task,
)
self.assertFalse(
expr=self.worker.post_work.call_args[1]['success'],
)
self.assertIsInstance(
obj=self.worker.post_work.call_args[1]['exception'],
cls=sergeant.worker.WorkerRespawn,
)
self.worker.handle_success.assert_not_called()
self.worker.handle_failure.assert_not_called()
self.worker.handle_timeout.assert_not_called()
self.worker.handle_retry.assert_not_called()
self.worker.handle_max_retries.assert_not_called()
self.worker.handle_requeue.assert_not_called()
| 30.960317
| 73
| 0.593591
| 2,022
| 19,505
| 5.453511
| 0.054896
| 0.145098
| 0.098667
| 0.089598
| 0.917657
| 0.880112
| 0.830598
| 0.824431
| 0.802938
| 0.776004
| 0
| 0.003056
| 0.312125
| 19,505
| 629
| 74
| 31.009539
| 0.818812
| 0
| 0
| 0.637782
| 0
| 0
| 0.018047
| 0
| 0
| 0
| 0
| 0
| 0.239168
| 1
| 0.038128
| false
| 0
| 0.010399
| 0
| 0.05026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9324cdbc46662d10025be3c4ece77d315da7b86b
| 179
|
py
|
Python
|
test/TaskReporter/__init__.py
|
paulondc/chilopoda
|
046dbb0c1b4ff20ea5f2e1679f8d89f3089b6aa4
|
[
"MIT"
] | 2
|
2019-09-24T18:56:27.000Z
|
2021-02-07T04:58:49.000Z
|
test/TaskReporter/__init__.py
|
paulondc/kombi
|
046dbb0c1b4ff20ea5f2e1679f8d89f3089b6aa4
|
[
"MIT"
] | 20
|
2019-02-16T04:21:13.000Z
|
2019-03-09T21:21:21.000Z
|
test/TaskReporter/__init__.py
|
paulondc/kombi
|
046dbb0c1b4ff20ea5f2e1679f8d89f3089b6aa4
|
[
"MIT"
] | 3
|
2019-11-15T05:16:32.000Z
|
2021-09-28T21:28:29.000Z
|
from .ColumnsTaskReporterTest import ColumnsTaskReporterTest
from .DetailedTaskReporterTest import DetailedTaskReporterTest
from .JsonTaskReporterTest import JsonTaskReporterTest
| 44.75
| 62
| 0.916201
| 12
| 179
| 13.666667
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067039
| 179
| 3
| 63
| 59.666667
| 0.982036
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fa8b3ef5b625c42301e955bd217c837c539d7b37
| 233
|
py
|
Python
|
mcmf.py
|
mathpresso/Flo
|
c17b2aa8044c4323d9aa4aa8589f42f7a8817745
|
[
"MIT"
] | null | null | null |
mcmf.py
|
mathpresso/Flo
|
c17b2aa8044c4323d9aa4aa8589f42f7a8817745
|
[
"MIT"
] | null | null | null |
mcmf.py
|
mathpresso/Flo
|
c17b2aa8044c4323d9aa4aa8589f42f7a8817745
|
[
"MIT"
] | null | null | null |
import mcmfModule
print(
mcmfModule.mcmf(
"夏期講習B日程t数復習(文型)1①三角関数8月27日(火)①t講習問題】sinθ−cosθ=½{1}{2}が成り立つとき,sinθ,co∞θの値を求めよ。",
"夏期講習B日程t数復習(文型)2①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①①",
4,
)
)
| 23.3
| 88
| 0.695279
| 27
| 233
| 6.148148
| 0.777778
| 0.156627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.314136
| 0.180258
| 233
| 9
| 89
| 25.888889
| 0.528796
| 0
| 0
| 0
| 0
| 0.125
| 0.613734
| 0.613734
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.125
| 0
| 0.125
| 0.125
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fad23f93d774ed6f32327e2d3d9e1ae866ca597f
| 4,356
|
py
|
Python
|
saleor/plugins/sendgrid/tests/test_plugin.py
|
greentornado/saleor
|
7f58917957a23c4dd90b47214a4500c91c735dee
|
[
"CC-BY-4.0"
] | 3
|
2021-06-22T12:38:18.000Z
|
2021-07-11T15:01:57.000Z
|
saleor/plugins/sendgrid/tests/test_plugin.py
|
greentornado/saleor
|
7f58917957a23c4dd90b47214a4500c91c735dee
|
[
"CC-BY-4.0"
] | 111
|
2021-06-30T08:51:06.000Z
|
2022-03-28T04:48:49.000Z
|
saleor/plugins/sendgrid/tests/test_plugin.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 6
|
2021-11-08T16:43:05.000Z
|
2022-03-22T17:31:16.000Z
|
from dataclasses import asdict
from unittest.mock import MagicMock, patch
import pytest
from django.core.exceptions import ValidationError
from ....core.notify_events import AdminNotifyEvent, UserNotifyEvent
from ...models import PluginConfiguration
from ..plugin import EVENT_MAP
def test_get_event_map():
for event in UserNotifyEvent.CHOICES:
assert event in EVENT_MAP
@patch("saleor.plugins.sendgrid.plugin.EVENT_MAP")
def test_notify_when_plugin_disabled(mocked_event_map, sendgrid_email_plugin):
mocked_event_task = MagicMock()
event_map = {
UserNotifyEvent.ACCOUNT_PASSWORD_RESET: (
mocked_event_task,
"account_password_reset_template_id",
)
}
mocked_event_map.__getitem__.side_effect = event_map.__getitem__
mocked_event_map.get.side_effect = event_map.get
plugin = sendgrid_email_plugin(active=False)
plugin.notify(UserNotifyEvent.ACCOUNT_PASSWORD_RESET, {}, None)
assert not mocked_event_task.delay.called
@patch("saleor.plugins.sendgrid.plugin.EVENT_MAP")
def test_notify_not_valid_event_type(mocked_event_map, sendgrid_email_plugin):
mocked_event_task = MagicMock()
event_map = {
UserNotifyEvent.ACCOUNT_PASSWORD_RESET: (
mocked_event_task,
"account_password_reset_template_id",
)
}
mocked_event_map.__getitem__.side_effect = event_map.__getitem__
mocked_event_map.get.side_effect = event_map.get
plugin = sendgrid_email_plugin(api_key="AB12", active=True)
plugin.notify(AdminNotifyEvent.CSV_EXPORT_FAILED, {}, None)
assert not mocked_event_task.delay.called
@patch("saleor.plugins.sendgrid.plugin.EVENT_MAP")
def test_notify_missing_handler(mocked_event_map, sendgrid_email_plugin):
sample_payload = {"key_1": "value"}
mocked_event_task = MagicMock()
event_map = {
UserNotifyEvent.ACCOUNT_CHANGE_EMAIL_REQUEST: (
mocked_event_task,
"account_password_reset_template_id",
)
}
mocked_event_map.__contains__.side_effect = event_map.__contains__
plugin = sendgrid_email_plugin(api_key="AB12", active=True)
plugin.notify(UserNotifyEvent.ACCOUNT_PASSWORD_RESET, sample_payload, None)
assert mocked_event_map.__contains__.called
assert not mocked_event_task.delay.called
@patch("saleor.plugins.sendgrid.plugin.EVENT_MAP")
def test_notify_missing_template_id(mocked_event_map, sendgrid_email_plugin):
sample_payload = {"key_1": "value"}
mocked_event_task = MagicMock()
event_map = {
UserNotifyEvent.ACCOUNT_PASSWORD_RESET: (
mocked_event_task,
"account_password_reset_template_id",
)
}
mocked_event_map.__getitem__.side_effect = event_map.__getitem__
mocked_event_map.__contains__.return_value = True
mocked_event_map.get.side_effect = event_map.get
plugin = sendgrid_email_plugin(
active=True, api_key="AB12", account_password_reset_template_id=None
)
plugin.notify(UserNotifyEvent.ACCOUNT_PASSWORD_RESET, sample_payload, None)
assert mocked_event_map.get.called
assert not mocked_event_task.delay.called
@patch("saleor.plugins.sendgrid.plugin.EVENT_MAP")
def test_notify(mocked_event_map, sendgrid_email_plugin):
sample_payload = {"key_1": "value"}
mocked_event_task = MagicMock()
event_map = {
UserNotifyEvent.ACCOUNT_PASSWORD_RESET: (
mocked_event_task,
"account_password_reset_template_id",
)
}
mocked_event_map.__getitem__.side_effect = event_map.__getitem__
mocked_event_map.__contains__.return_value = True
mocked_event_map.get.side_effect = event_map.get
plugin = sendgrid_email_plugin(
active=True, api_key="AB12", account_password_reset_template_id="123"
)
plugin.notify(UserNotifyEvent.ACCOUNT_PASSWORD_RESET, sample_payload, None)
mocked_event_task.delay.assert_called_once_with(
sample_payload, asdict(plugin.config)
)
def test_save_plugin_configuration_missing_api_key(
sendgrid_email_plugin,
):
plugin = sendgrid_email_plugin(active=False)
configuration = PluginConfiguration.objects.get()
data_to_save = {"active": True, "configuration": []}
with pytest.raises(ValidationError):
plugin.save_plugin_configuration(configuration, data_to_save)
| 32.029412
| 79
| 0.75023
| 529
| 4,356
| 5.661626
| 0.151229
| 0.106845
| 0.08414
| 0.05409
| 0.752588
| 0.749249
| 0.719866
| 0.719866
| 0.719866
| 0.698498
| 0
| 0.00387
| 0.169421
| 4,356
| 135
| 80
| 32.266667
| 0.823936
| 0
| 0
| 0.55
| 0
| 0
| 0.100551
| 0.08494
| 0
| 0
| 0
| 0
| 0.08
| 1
| 0.07
| false
| 0.15
| 0.07
| 0
| 0.14
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
877f66191b16230a8077a1e28fd39ab635b3ae68
| 42
|
py
|
Python
|
__init__.py
|
penrin/audioproc
|
ac3df5015d87f2a1e2a7a86ac7f5b75ae8314c03
|
[
"MIT"
] | 2
|
2018-09-18T08:55:26.000Z
|
2020-01-24T04:31:25.000Z
|
__init__.py
|
penrin/audioproc
|
ac3df5015d87f2a1e2a7a86ac7f5b75ae8314c03
|
[
"MIT"
] | null | null | null |
__init__.py
|
penrin/audioproc
|
ac3df5015d87f2a1e2a7a86ac7f5b75ae8314c03
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from .audioproc import *
| 10.5
| 24
| 0.690476
| 6
| 42
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.190476
| 42
| 3
| 25
| 14
| 0.823529
| 0.309524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
87ab43416abb351c95af0a973ddf4a7430e9a1f2
| 28,137
|
py
|
Python
|
testsuite/test_directed_un_weighted_graph.py
|
trycatchhorn/PyAlgDat
|
85f8c7550630cf31b5e4472fd593956c9d96c078
|
[
"MIT"
] | null | null | null |
testsuite/test_directed_un_weighted_graph.py
|
trycatchhorn/PyAlgDat
|
85f8c7550630cf31b5e4472fd593956c9d96c078
|
[
"MIT"
] | null | null | null |
testsuite/test_directed_un_weighted_graph.py
|
trycatchhorn/PyAlgDat
|
85f8c7550630cf31b5e4472fd593956c9d96c078
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env py.test
"""
Test directed unweighted graph.
"""
import unittest
import copy
from py_alg_dat import dfs_edge_classification
from py_alg_dat import graph
from py_alg_dat import graph_edge
from py_alg_dat import graph_vertex
class TestDirectedUnWeightedGraph(unittest.TestCase):
"""
Test directed unweighted graph.
"""
def setUp(self):
# Create directed unweighted graph Cormen page 596.
self.graph1 = graph.DirectedUnWeightedGraph(5)
self.v1_g1 = graph_vertex.UnWeightedGraphVertex(self.graph1, 'S')
self.v2_g1 = graph_vertex.UnWeightedGraphVertex(self.graph1, 'T')
self.v3_g1 = graph_vertex.UnWeightedGraphVertex(self.graph1, 'X')
self.v4_g1 = graph_vertex.UnWeightedGraphVertex(self.graph1, 'Y')
self.v5_g1 = graph_vertex.UnWeightedGraphVertex(self.graph1, 'Z')
self.graph1.add_vertex(self.v1_g1)
self.graph1.add_vertex(self.v2_g1)
self.graph1.add_vertex(self.v3_g1)
self.graph1.add_vertex(self.v4_g1)
self.graph1.add_vertex(self.v5_g1)
self.e12 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph1, self.v1_g1, self.v2_g1) # S -> T
self.e14 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph1, self.v1_g1, self.v4_g1) # S -> Y
self.e23 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph1, self.v2_g1, self.v3_g1) # T -> X
self.e24 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph1, self.v2_g1, self.v4_g1) # T -> Y
self.e35 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph1, self.v3_g1, self.v5_g1) # X -> Z
self.e42 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph1, self.v4_g1, self.v2_g1) # Y -> T
self.e43 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph1, self.v4_g1, self.v3_g1) # Y -> X
self.e45 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph1, self.v4_g1, self.v5_g1) # Y -> Z
self.e53 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph1, self.v5_g1, self.v3_g1) # Z -> X
self.e51 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph1, self.v5_g1, self.v1_g1) # Z -> S
self.graph1.add_edge(self.v1_g1, self.v2_g1)
self.graph1.add_edge(self.v1_g1, self.v4_g1)
self.graph1.add_edge(self.v2_g1, self.v3_g1)
self.graph1.add_edge(self.v2_g1, self.v4_g1)
self.graph1.add_edge(self.v3_g1, self.v5_g1)
self.graph1.add_edge(self.v4_g1, self.v2_g1)
self.graph1.add_edge(self.v4_g1, self.v3_g1)
self.graph1.add_edge(self.v4_g1, self.v5_g1)
self.graph1.add_edge(self.v5_g1, self.v3_g1)
self.graph1.add_edge(self.v5_g1, self.v1_g1)
# Create directed unweighted acyclic graph Bruno R. Preiss - Java - page 563.
self.graph2 = graph.DirectedUnWeightedGraph(9)
self.v0_g2 = graph_vertex.UnWeightedGraphVertex(self.graph2, 'a')
self.v1_g2 = graph_vertex.UnWeightedGraphVertex(self.graph2, 'b')
self.v2_g2 = graph_vertex.UnWeightedGraphVertex(self.graph2, 'c')
self.v3_g2 = graph_vertex.UnWeightedGraphVertex(self.graph2, 'd')
self.v4_g2 = graph_vertex.UnWeightedGraphVertex(self.graph2, 'e')
self.v5_g2 = graph_vertex.UnWeightedGraphVertex(self.graph2, 'f')
self.v6_g2 = graph_vertex.UnWeightedGraphVertex(self.graph2, 'g')
self.v7_g2 = graph_vertex.UnWeightedGraphVertex(self.graph2, 'h')
self.v8_g2 = graph_vertex.UnWeightedGraphVertex(self.graph2, 'i')
self.graph2.add_vertex(self.v0_g2)
self.graph2.add_vertex(self.v1_g2)
self.graph2.add_vertex(self.v2_g2)
self.graph2.add_vertex(self.v3_g2)
self.graph2.add_vertex(self.v4_g2)
self.graph2.add_vertex(self.v5_g2)
self.graph2.add_vertex(self.v6_g2)
self.graph2.add_vertex(self.v7_g2)
self.graph2.add_vertex(self.v8_g2)
self.e01_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v0_g2, self.v1_g2)
self.e02_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v0_g2, self.v2_g2)
self.e04_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v0_g2, self.v4_g2)
self.e13_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v1_g2, self.v3_g2)
self.e14_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v1_g2, self.v4_g2)
self.e27_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v2_g2, self.v7_g2)
self.e25_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v2_g2, self.v5_g2)
self.e36_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v3_g2, self.v6_g2)
self.e46_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v4_g2, self.v6_g2)
self.e48_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v4_g2, self.v8_g2)
self.e47_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v4_g2, self.v7_g2)
self.e57_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v5_g2, self.v7_g2)
self.e68_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v6_g2, self.v8_g2)
self.e78_g2 = graph_edge.DirectedUnWeightedGraphEdge(
self.graph2, self.v7_g2, self.v8_g2)
self.graph2.add_edge(self.v0_g2, self.v1_g2) # a -> b
self.graph2.add_edge(self.v0_g2, self.v2_g2) # a -> c
self.graph2.add_edge(self.v0_g2, self.v4_g2) # a -> e
self.graph2.add_edge(self.v1_g2, self.v3_g2) # b -> d
self.graph2.add_edge(self.v1_g2, self.v4_g2) # b -> e
self.graph2.add_edge(self.v2_g2, self.v7_g2) # c -> h
self.graph2.add_edge(self.v2_g2, self.v5_g2) # c -> f
self.graph2.add_edge(self.v3_g2, self.v6_g2) # d -> g
self.graph2.add_edge(self.v4_g2, self.v6_g2) # e -> g
self.graph2.add_edge(self.v4_g2, self.v8_g2) # e -> i
self.graph2.add_edge(self.v4_g2, self.v7_g2) # e -> h
self.graph2.add_edge(self.v5_g2, self.v7_g2) # f -> h
self.graph2.add_edge(self.v6_g2, self.v8_g2) # g -> i
self.graph2.add_edge(self.v7_g2, self.v8_g2) # h -> i
def test_directed_un_weighted_graph_copy(self):
"""
Test operator "copy".
"""
a_graph = graph.DirectedUnWeightedGraph(5)
vertex1 = graph_vertex.UnWeightedGraphVertex(a_graph, 'A')
vertex2 = graph_vertex.UnWeightedGraphVertex(a_graph, 'B')
vertex3 = graph_vertex.UnWeightedGraphVertex(a_graph, 'C')
vertex4 = graph_vertex.UnWeightedGraphVertex(a_graph, 'D')
vertex5 = graph_vertex.UnWeightedGraphVertex(a_graph, 'E')
a_graph.add_vertex(vertex1)
a_graph.add_vertex(vertex2)
a_graph.add_vertex(vertex3)
a_graph.add_vertex(vertex4)
a_graph.add_vertex(vertex5)
a_graph.add_edge(vertex1, vertex2)
a_graph.add_edge(vertex1, vertex3)
a_graph.add_edge(vertex1, vertex4)
a_graph.add_edge(vertex1, vertex5)
a_graph.add_edge(vertex2, vertex3)
a_graph.add_edge(vertex2, vertex4)
a_graph.add_edge(vertex2, vertex5)
a_graph.add_edge(vertex3, vertex4)
a_graph.add_edge(vertex3, vertex5)
a_graph.add_edge(vertex4, vertex5)
ref = copy.copy(a_graph)
self.assertEqual(a_graph, ref)
def test_directed_un_weighted_graph_len(self):
"""
Test operator "len".
"""
self.assertEqual(5, len(self.graph1))
def test_directed_un_weighted_graph_get_item(self):
"""
Test operator "get_item".
"""
self.assertEqual(self.graph1.get_vertex_at_index(3), self.graph1[3])
def test_directed_un_weighted_graph_get_number_of_vertices(self):
"""
Test method "get_number_of_vertices".
"""
self.assertEqual(5, self.graph1.get_number_of_vertices())
def test_directed_un_weighted_graph_get_number_of_edges(self):
"""
Test method "get_number_of_edges".
"""
self.assertEqual(10, self.graph1.get_number_of_edges())
def test_directed_un_weighted_graph_get_vertices(self):
"""
Test method "get_number_of_vertices".
"""
tmp1 = []
tmp1.append(self.v1_g1)
tmp1.append(self.v2_g1)
tmp1.append(self.v3_g1)
tmp1.append(self.v4_g1)
tmp1.append(self.v5_g1)
tmp2 = []
for i in self.graph1.get_vertices():
tmp2.append(i)
s_list1 = sorted(tmp1, key=lambda vertex: vertex.vertex_number)
s_list2 = sorted(tmp2, key=lambda vertex: vertex.vertex_number)
self.assertEqual(s_list1, s_list2)
def test_directed_un_weighted_graph_get_edges(self):
"""
Test method "get_edges".
"""
tmp1 = []
tmp1.append(self.e12)
tmp1.append(self.e14)
tmp1.append(self.e23)
tmp1.append(self.e24)
tmp1.append(self.e35)
tmp1.append(self.e42)
tmp1.append(self.e43)
tmp1.append(self.e45)
tmp1.append(self.e53)
tmp1.append(self.e51)
tmp2 = []
for i in self.graph1.get_edges():
tmp2.append(i)
s_list1 = sorted(tmp1, key=lambda edge: (
edge.head_vertex, edge.tail_vertex))
s_list2 = sorted(tmp2, key=lambda edge: (
edge.head_vertex, edge.tail_vertex))
self.assertEqual(s_list1, s_list2)
def test_directed_un_weighted_graph_get_edge(self):
"""
Test method "get_edge".
"""
self.assertEqual(self.e12, self.graph1.get_edge(
self.v1_g1, self.v2_g1))
def test_directed_un_weighted_graph_is_edge(self):
"""
Test method "is_edge".
"""
try:
self.assertTrue(self.graph1.is_edge(self.v1_g1, self.v2_g1))
except KeyError:
print "Exception caught: %s" % str(KeyError)
def test_directed_un_weighted_graph_is_directed(self):
"""
Test method "is_directed".
"""
self.assertTrue(self.graph1.is_directed())
def test_directed_un_weighted_graph_remove_vertex_v0(self):
"""
Test method "remove_vertex".
"""
# Create a graph from where a vertex should be removed.
a_graph = graph.DirectedUnWeightedGraph(5)
vertex0 = graph_vertex.UnWeightedGraphVertex(a_graph, 'A')
vertex1 = graph_vertex.UnWeightedGraphVertex(a_graph, 'B')
vertex2 = graph_vertex.UnWeightedGraphVertex(a_graph, 'C')
vertex3 = graph_vertex.UnWeightedGraphVertex(a_graph, 'D')
vertex4 = graph_vertex.UnWeightedGraphVertex(a_graph, 'E')
# Add vertices to the graph.
a_graph.add_vertex(vertex0)
a_graph.add_vertex(vertex1)
a_graph.add_vertex(vertex2)
a_graph.add_vertex(vertex3)
a_graph.add_vertex(vertex4)
# Add edges to the graph.
a_graph.add_edge(vertex0, vertex1)
a_graph.add_edge(vertex0, vertex2)
a_graph.add_edge(vertex0, vertex3)
a_graph.add_edge(vertex0, vertex4)
a_graph.add_edge(vertex1, vertex2)
a_graph.add_edge(vertex1, vertex3)
a_graph.add_edge(vertex1, vertex4)
a_graph.add_edge(vertex2, vertex3)
a_graph.add_edge(vertex2, vertex4)
a_graph.add_edge(vertex3, vertex4)
# Create a reference graph used to compare the result after a vertex has been removed.
g_ref = graph.DirectedUnWeightedGraph(4)
# Create reference vertices.
v1_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'B')
v2_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'C')
v3_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'D')
v4_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'E')
# Add vertices to the reference graph.
g_ref.add_vertex(v1_ref)
g_ref.add_vertex(v2_ref)
g_ref.add_vertex(v3_ref)
g_ref.add_vertex(v4_ref)
# Add edges to the reference graph.
g_ref.add_edge(v1_ref, v2_ref)
g_ref.add_edge(v1_ref, v3_ref)
g_ref.add_edge(v1_ref, v4_ref)
g_ref.add_edge(v2_ref, v3_ref)
g_ref.add_edge(v2_ref, v4_ref)
g_ref.add_edge(v3_ref, v4_ref)
# Remove vertex form graph.
a_graph.remove_vertex(vertex0)
self.assertEqual(g_ref, a_graph)
def test_directed_un_weighted_graph_remove_vertex_v1(self):
"""
Test method "remove_vertex".
"""
# Create a graph from where a vertex should be removed.
a_graph = graph.DirectedUnWeightedGraph(5)
vertex0 = graph_vertex.UnWeightedGraphVertex(a_graph, 'A')
vertex1 = graph_vertex.UnWeightedGraphVertex(a_graph, 'B')
vertex2 = graph_vertex.UnWeightedGraphVertex(a_graph, 'C')
vertex3 = graph_vertex.UnWeightedGraphVertex(a_graph, 'D')
vertex4 = graph_vertex.UnWeightedGraphVertex(a_graph, 'E')
# Add vertices to the graph.
a_graph.add_vertex(vertex0)
a_graph.add_vertex(vertex1)
a_graph.add_vertex(vertex2)
a_graph.add_vertex(vertex3)
a_graph.add_vertex(vertex4)
# Add edges to the graph.
a_graph.add_edge(vertex0, vertex1)
a_graph.add_edge(vertex0, vertex2)
a_graph.add_edge(vertex0, vertex3)
a_graph.add_edge(vertex0, vertex4)
a_graph.add_edge(vertex1, vertex2)
a_graph.add_edge(vertex1, vertex3)
a_graph.add_edge(vertex1, vertex4)
a_graph.add_edge(vertex2, vertex3)
a_graph.add_edge(vertex2, vertex4)
a_graph.add_edge(vertex3, vertex4)
# Create a reference graph used to compare the result after a vertex has been removed.
g_ref = graph.DirectedUnWeightedGraph(4)
# Create reference vertices.
v0_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'A')
v2_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'C')
v3_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'D')
v4_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'E')
# Add vertices to the reference graph.
g_ref.add_vertex(v0_ref)
g_ref.add_vertex(v2_ref)
g_ref.add_vertex(v3_ref)
g_ref.add_vertex(v4_ref)
# Add edges to the reference graph.
g_ref.add_edge(v0_ref, v2_ref)
g_ref.add_edge(v0_ref, v3_ref)
g_ref.add_edge(v0_ref, v4_ref)
g_ref.add_edge(v2_ref, v3_ref)
g_ref.add_edge(v2_ref, v4_ref)
g_ref.add_edge(v3_ref, v4_ref)
# Remove vertex form graph.
a_graph.remove_vertex(vertex1)
self.assertEqual(g_ref, a_graph)
def test_directed_un_weighted_graph_remove_vertex_v2(self):
"""
Test method "remove_vertex".
"""
# Create a graph from where a vertex should be removed.
a_graph = graph.DirectedUnWeightedGraph(5)
vertex0 = graph_vertex.UnWeightedGraphVertex(a_graph, 'A')
vertex1 = graph_vertex.UnWeightedGraphVertex(a_graph, 'B')
vertex2 = graph_vertex.UnWeightedGraphVertex(a_graph, 'C')
vertex3 = graph_vertex.UnWeightedGraphVertex(a_graph, 'D')
vertex4 = graph_vertex.UnWeightedGraphVertex(a_graph, 'E')
# Add vertices to the graph.
a_graph.add_vertex(vertex0)
a_graph.add_vertex(vertex1)
a_graph.add_vertex(vertex2)
a_graph.add_vertex(vertex3)
a_graph.add_vertex(vertex4)
# Add edges to the graph.
a_graph.add_edge(vertex0, vertex1)
a_graph.add_edge(vertex0, vertex2)
a_graph.add_edge(vertex0, vertex3)
a_graph.add_edge(vertex0, vertex4)
a_graph.add_edge(vertex1, vertex2)
a_graph.add_edge(vertex1, vertex3)
a_graph.add_edge(vertex1, vertex4)
a_graph.add_edge(vertex2, vertex3)
a_graph.add_edge(vertex2, vertex4)
a_graph.add_edge(vertex3, vertex4)
# Create a reference graph used to compare the result after a vertex has been removed.
g_ref = graph.DirectedUnWeightedGraph(4)
# Create reference vertices.
v0_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'A')
v1_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'B')
v3_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'D')
v4_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'E')
# Add vertices to the reference graph.
g_ref.add_vertex(v0_ref)
g_ref.add_vertex(v1_ref)
g_ref.add_vertex(v3_ref)
g_ref.add_vertex(v4_ref)
# Add edges to the reference graph.
g_ref.add_edge(v0_ref, v1_ref)
g_ref.add_edge(v0_ref, v3_ref)
g_ref.add_edge(v0_ref, v4_ref)
g_ref.add_edge(v1_ref, v3_ref)
g_ref.add_edge(v1_ref, v4_ref)
g_ref.add_edge(v3_ref, v4_ref)
# Remove vertex form graph.
a_graph.remove_vertex(vertex2)
self.assertEqual(g_ref, a_graph)
def test_directed_un_weighted_graph_remove_vertex_v3(self):
"""
Test method "remove_vertex".
"""
# Create a graph from where a vertex should be removed.
a_graph = graph.DirectedUnWeightedGraph(5)
vertex0 = graph_vertex.UnWeightedGraphVertex(a_graph, 'A')
vertex1 = graph_vertex.UnWeightedGraphVertex(a_graph, 'B')
vertex2 = graph_vertex.UnWeightedGraphVertex(a_graph, 'C')
vertex3 = graph_vertex.UnWeightedGraphVertex(a_graph, 'D')
vertex4 = graph_vertex.UnWeightedGraphVertex(a_graph, 'E')
# Add vertices to the graph.
a_graph.add_vertex(vertex0)
a_graph.add_vertex(vertex1)
a_graph.add_vertex(vertex2)
a_graph.add_vertex(vertex3)
a_graph.add_vertex(vertex4)
# Add edges to the graph.
a_graph.add_edge(vertex0, vertex1)
a_graph.add_edge(vertex0, vertex2)
a_graph.add_edge(vertex0, vertex3)
a_graph.add_edge(vertex0, vertex4)
a_graph.add_edge(vertex1, vertex2)
a_graph.add_edge(vertex1, vertex3)
a_graph.add_edge(vertex1, vertex4)
a_graph.add_edge(vertex2, vertex3)
a_graph.add_edge(vertex2, vertex4)
a_graph.add_edge(vertex3, vertex4)
# Create a reference graph used to compare the result after a vertex has been removed.
g_ref = graph.DirectedUnWeightedGraph(4)
# Create reference vertices.
v0_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'A')
v1_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'B')
v2_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'C')
v4_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'E')
# Add vertices to the reference graph.
g_ref.add_vertex(v0_ref)
g_ref.add_vertex(v1_ref)
g_ref.add_vertex(v2_ref)
g_ref.add_vertex(v4_ref)
# Add edges to the reference graph.
g_ref.add_edge(v0_ref, v1_ref)
g_ref.add_edge(v0_ref, v2_ref)
g_ref.add_edge(v0_ref, v4_ref)
g_ref.add_edge(v1_ref, v2_ref)
g_ref.add_edge(v1_ref, v4_ref)
g_ref.add_edge(v2_ref, v4_ref)
# Remove vertex form graph.
a_graph.remove_vertex(vertex3)
self.assertEqual(g_ref, a_graph)
def test_directed_un_weighted_graph_remove_vertex_v4(self):
"""
Test method "remove_vertex".
"""
# Create a graph from where a vertex should be removed.
a_graph = graph.DirectedUnWeightedGraph(5)
vertex0 = graph_vertex.UnWeightedGraphVertex(a_graph, 'A')
vertex1 = graph_vertex.UnWeightedGraphVertex(a_graph, 'B')
vertex2 = graph_vertex.UnWeightedGraphVertex(a_graph, 'C')
vertex3 = graph_vertex.UnWeightedGraphVertex(a_graph, 'D')
vertex4 = graph_vertex.UnWeightedGraphVertex(a_graph, 'E')
# Add vertices to the graph.
a_graph.add_vertex(vertex0)
a_graph.add_vertex(vertex1)
a_graph.add_vertex(vertex2)
a_graph.add_vertex(vertex3)
a_graph.add_vertex(vertex4)
# Add edges to the graph.
a_graph.add_edge(vertex0, vertex1)
a_graph.add_edge(vertex0, vertex2)
a_graph.add_edge(vertex0, vertex3)
a_graph.add_edge(vertex0, vertex4)
a_graph.add_edge(vertex1, vertex2)
a_graph.add_edge(vertex1, vertex3)
a_graph.add_edge(vertex1, vertex4)
a_graph.add_edge(vertex2, vertex3)
a_graph.add_edge(vertex2, vertex4)
a_graph.add_edge(vertex3, vertex4)
# Create a reference graph used to compare the result after a vertex has been removed.
g_ref = graph.DirectedUnWeightedGraph(4)
# Create reference vertices.
v0_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'A')
v1_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'B')
v2_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'C')
v3_ref = graph_vertex.UnWeightedGraphVertex(g_ref, 'D')
# Add vertices to the reference graph.
g_ref.add_vertex(v0_ref)
g_ref.add_vertex(v1_ref)
g_ref.add_vertex(v2_ref)
g_ref.add_vertex(v3_ref)
# Add edges to the reference graph.
g_ref.add_edge(v0_ref, v1_ref)
g_ref.add_edge(v0_ref, v2_ref)
g_ref.add_edge(v0_ref, v3_ref)
g_ref.add_edge(v1_ref, v2_ref)
g_ref.add_edge(v1_ref, v3_ref)
g_ref.add_edge(v2_ref, v3_ref)
# Remove vertex form graph.
a_graph.remove_vertex(vertex4)
self.assertEqual(g_ref, a_graph)
def test_directed_un_weighted_graph_is_strongly_connected(self):
"""
Test method "is_strongly_connected".
"""
self.assertTrue(self.graph1.is_strongly_connected())
def test_directed_un_weighted_graph_is_strongly_connected_not(self):
"""
Test method "is_strongly_connected" - inverted.
"""
# The graph 'g2' is not strongly connected,
# since no vertex can be reached from vertex 'a'
self.assertFalse(self.graph2.is_strongly_connected())
def test_directed_un_weighted_graph_is_cyclic(self):
"""
Test method "is_cyclic".
"""
# Create the cyclic graph shown below:
#
# A---------->----------B
# | |
# | |
# | |
# ^ V
# | |
# | |
# | |
# D----------<----------C
a_graph = graph.DirectedUnWeightedGraph(4)
v_a = graph_vertex.UnWeightedGraphVertex(a_graph, 'A')
v_b = graph_vertex.UnWeightedGraphVertex(a_graph, 'B')
v_c = graph_vertex.UnWeightedGraphVertex(a_graph, 'C')
v_d = graph_vertex.UnWeightedGraphVertex(a_graph, 'D')
a_graph.add_vertex(v_a)
a_graph.add_vertex(v_b)
a_graph.add_vertex(v_c)
a_graph.add_vertex(v_d)
a_graph.add_edge(v_a, v_b)
a_graph.add_edge(v_b, v_c)
a_graph.add_edge(v_c, v_d)
a_graph.add_edge(v_d, v_a)
self.assertTrue(a_graph.is_cyclic())
def test_directed_un_weighted_graph_is_cyclic_not(self):
"""
Test method "is_cyclic" - inverted.
"""
# Create the acyclic graph shown below:
#
# A----------<----------B
# | |
# | |
# | |
# ^ ^
# | |
# | |
# | |
# D----------<----------C
a_graph = graph.DirectedUnWeightedGraph(4)
v_a = graph_vertex.UnWeightedGraphVertex(a_graph, 'A')
v_b = graph_vertex.UnWeightedGraphVertex(a_graph, 'B')
v_c = graph_vertex.UnWeightedGraphVertex(a_graph, 'C')
v_d = graph_vertex.UnWeightedGraphVertex(a_graph, 'D')
a_graph.add_vertex(v_a)
a_graph.add_vertex(v_b)
a_graph.add_vertex(v_c)
a_graph.add_vertex(v_d)
a_graph.add_edge(v_b, v_a)
a_graph.add_edge(v_d, v_a)
a_graph.add_edge(v_c, v_b)
a_graph.add_edge(v_c, v_d)
self.assertFalse(a_graph.is_cyclic())
def test_directed_un_weighted_graph_get_vertex_at_index(self):
"""
Test method "get_vertex_at_index".
"""
self.assertEqual(self.v4_g1, self.graph1.get_vertex_at_index(3))
def test_directed_un_weighted_graph_get_emanating_edges(self):
"""
Test method "get_emanating_edges".
"""
ref = []
res = []
ref.append(self.e12)
ref.append(self.e14)
res = self.graph1.get_emanating_edges(self.v1_g1.get_vertex_number())
self.assertEqual(ref, res)
def test_directed_un_weighted_graph_get_incident_edges_v1(self):
"""
Test method "get_incident_edges".
"""
ref = []
res = []
ref.append(self.e51)
res = self.graph1.get_incident_edges(self.v1_g1.get_vertex_number())
self.assertEqual(ref, res)
def test_directed_un_weighted_graph_classify_edges_cyclic(self):
"""
Test edge classification - directed unweighted cyclic graph.
"""
# Create a directed unweighted cyclic graph
a_graph = graph.DirectedUnWeightedGraph(4)
vertex1 = graph_vertex.UnWeightedGraphVertex(a_graph, 'A')
vertex2 = graph_vertex.UnWeightedGraphVertex(a_graph, 'B')
vertex3 = graph_vertex.UnWeightedGraphVertex(a_graph, 'C')
vertex4 = graph_vertex.UnWeightedGraphVertex(a_graph, 'D')
a_graph.add_vertex(vertex1)
a_graph.add_vertex(vertex2)
a_graph.add_vertex(vertex3)
a_graph.add_vertex(vertex4)
a_graph.add_edge(vertex1, vertex2)
a_graph.add_edge(vertex2, vertex3)
a_graph.add_edge(vertex3, vertex1)
res = a_graph.classify_edges().get_edges()
ref = dfs_edge_classification.DFSEdgeClassification(
a_graph).get_edges()
e12 = graph_edge.DirectedUnWeightedGraphEdge(a_graph, vertex1, vertex2)
e23 = graph_edge.DirectedUnWeightedGraphEdge(a_graph, vertex2, vertex3)
e31 = graph_edge.DirectedUnWeightedGraphEdge(a_graph, vertex3, vertex1)
ref[e12] = graph_edge.EdgeClassification.TREE_EDGE
ref[e23] = graph_edge.EdgeClassification.TREE_EDGE
ref[e31] = graph_edge.EdgeClassification.BACK_EDGE
self.assertEqual(res, ref)
def test_directed_un_weighted_graph_classify_edges_acyclic(self):
"""
Test edge classification - directed unweighted acyclic graph.
"""
# Create a directed unweighted acyclic graph
a_graph = graph.DirectedUnWeightedGraph(4)
vertex1 = graph_vertex.UnWeightedGraphVertex(a_graph, 'A')
vertex2 = graph_vertex.UnWeightedGraphVertex(a_graph, 'B')
vertex3 = graph_vertex.UnWeightedGraphVertex(a_graph, 'C')
vertex4 = graph_vertex.UnWeightedGraphVertex(a_graph, 'D')
a_graph.add_vertex(vertex1)
a_graph.add_vertex(vertex2)
a_graph.add_vertex(vertex3)
a_graph.add_vertex(vertex4)
a_graph.add_edge(vertex1, vertex2)
a_graph.add_edge(vertex2, vertex3)
a_graph.add_edge(vertex2, vertex4)
res = a_graph.classify_edges().get_edges()
ref = dfs_edge_classification.DFSEdgeClassification(
a_graph).get_edges()
e12 = graph_edge.DirectedUnWeightedGraphEdge(a_graph, vertex1, vertex2)
e23 = graph_edge.DirectedUnWeightedGraphEdge(a_graph, vertex2, vertex3)
e24 = graph_edge.DirectedUnWeightedGraphEdge(a_graph, vertex2, vertex4)
ref[e12] = graph_edge.EdgeClassification.TREE_EDGE
ref[e23] = graph_edge.EdgeClassification.TREE_EDGE
ref[e24] = graph_edge.EdgeClassification.TREE_EDGE
self.assertEqual(res, ref)
| 39.188022
| 94
| 0.647581
| 3,713
| 28,137
| 4.595475
| 0.046324
| 0.072789
| 0.063295
| 0.056379
| 0.893161
| 0.852722
| 0.744887
| 0.712243
| 0.679775
| 0.609858
| 0
| 0.041574
| 0.252834
| 28,137
| 717
| 95
| 39.242678
| 0.770061
| 0.091197
| 0
| 0.583158
| 0
| 0
| 0.004159
| 0
| 0
| 0
| 0
| 0
| 0.050526
| 0
| null | null | 0
| 0.012632
| null | null | 0.002105
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
87be4387a82b4c163c9058a83e88b7b1f78e3979
| 82
|
py
|
Python
|
configs/NoC/noc_config.py
|
Maiux92/gem5-NVM-multiple-memory-spaces
|
0996bfd34638a7f3f05382cc1e7a813a177eed7f
|
[
"MIT"
] | 3
|
2021-04-24T16:08:00.000Z
|
2022-03-22T22:07:40.000Z
|
configs/NoC/noc_config.py
|
Maiux92/gem5-NVM-multiple-memory-spaces
|
0996bfd34638a7f3f05382cc1e7a813a177eed7f
|
[
"MIT"
] | null | null | null |
configs/NoC/noc_config.py
|
Maiux92/gem5-NVM-multiple-memory-spaces
|
0996bfd34638a7f3f05382cc1e7a813a177eed7f
|
[
"MIT"
] | 1
|
2021-03-25T16:55:08.000Z
|
2021-03-25T16:55:08.000Z
|
noc_config = [
["c", "c"],
["c", "c"],
["n", "v"],
#["c", "n"],
]
| 11.714286
| 16
| 0.219512
| 10
| 82
| 1.7
| 0.5
| 0.352941
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.341463
| 82
| 6
| 17
| 13.666667
| 0.314815
| 0.134146
| 0
| 0.4
| 0
| 0
| 0.085714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.