hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0895736ffacb80f0aa3e0f0691583c43198edc4a
| 343
|
py
|
Python
|
session_1/Test.py
|
idlaviV/intro-to-python
|
c3bd8e4f65ce6f1159f769a3da0fcc841b9d6d22
|
[
"MIT"
] | null | null | null |
session_1/Test.py
|
idlaviV/intro-to-python
|
c3bd8e4f65ce6f1159f769a3da0fcc841b9d6d22
|
[
"MIT"
] | null | null | null |
session_1/Test.py
|
idlaviV/intro-to-python
|
c3bd8e4f65ce6f1159f769a3da0fcc841b9d6d22
|
[
"MIT"
] | null | null | null |
for index, entry in enumerate({"Tam", "Tim", "Tom"}):
print(f"{index}: {entry}")
for index, entry in enumerate({"Tam", "Tom", "Tim"}):
print(f"{index}: {entry}")
for index, entry in enumerate({"Tam", "Tom", "Tim"}):
print(f"{index}: {entry}")
for index, entry in enumerate({"Tam", "Tom", "Tim"}):
print(f"{index}: {entry}")
| 28.583333
| 53
| 0.571429
| 48
| 343
| 4.083333
| 0.208333
| 0.408163
| 0.265306
| 0.306122
| 0.969388
| 0.969388
| 0.831633
| 0.831633
| 0.831633
| 0.831633
| 0
| 0
| 0.16035
| 343
| 11
| 54
| 31.181818
| 0.680556
| 0
| 0
| 0.875
| 0
| 0
| 0.291545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 12
|
08af45abc5d3404eac4f79a1cd4f2df3ff58a65c
| 56,248
|
py
|
Python
|
RecoJets/JetAnalyzers/test/runCMSDAS11DijetTestAnalyzer.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoJets/JetAnalyzers/test/runCMSDAS11DijetTestAnalyzer.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoJets/JetAnalyzers/test/runCMSDAS11DijetTestAnalyzer.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
# PYTHON configuration file for class: CMSDAS11DijetAnalyzer.cc
# Description: Example of simple EDAnalyzer for dijet mass & dijet spectrum ratio analysis
# Authors: J.P. Chou, Jason St. John
# Date: 01 - January - 2011
import FWCore.ParameterSet.Config as cms
process = cms.Process("Ana")
process.load("FWCore.MessageService.MessageLogger_cfi")
############# Set the number of events #############
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
############# Set the input for the analyzer #########
# Which input files to use
# 1: data, 10k events
# 2: MC resonance 1.2TeV
# 3: MC QCD only, no resonance
whichfiles = 1;
############# Format MessageLogger #################
process.MessageLogger.cerr.FwkReport.reportEvery = 10
############# This is how CMS handles output ROOT files #################
process.TFileService = cms.Service(
"TFileService",
fileName = cms.string("histos.root")
)
if (whichfiles==1):
thefileNames = cms.untracked.vstring('file:/uscms_data/d2/kalanand/dijet-Run2010A-JetMET-Nov4ReReco-9667events.root')
elif (whichfiles==2):
thefileNames = cms.untracked.vstring('/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0026/6C0BC238-4247-DF11-81D7-E41F1318160C.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/FE3B276E-1C47-DF11-83B7-00215E22053A.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/FCD94068-2647-DF11-80FF-E41F131817F8.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/F843DD92-2647-DF11-9B8F-E41F13181668.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/F62E9060-1C47-DF11-B431-00215E221BC0.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/DA63FD75-1C47-DF11-A347-00215E221692.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/D802BE12-1D47-DF11-9FA1-00215E93D738.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/D65DA979-1C47-DF11-941C-00215E21DD56.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/D2647956-1C47-DF11-9C8E-00215E2216EC.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/D0A03150-1C47-DF11-ACE6-E41F1318099C.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/CC2D830B-1D47-DF11-966E-E41F13181498.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/CACB401B-1847-DF11-B9C5-E41F13181D00.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/CA85A868-2647-DF11-86BA-00215E221818.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/CA4A0A60-1C47-DF11-836E-00215E21D86A.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/CA070629-2747-DF11-819C-00215E222790.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/C0757277-1C47-DF11-90B7-00215E2212D2.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/BE19E200-1D47-DF11-A019-00215E22200A.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/AE023D13-1D47-DF11-892F-E41F13181688.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/AC98A079-1E47-DF11-8842-E41F13180A64.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/AA56B163-1C47-DF11-BF57-00215E21D570.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/9E440461-1C47-DF11-AC32-00215E21DBA0.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/9A160E70-1C47-DF11-AE68-00215E2208EE.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/96DF3203-1D47-DF11-849B-00215E21D948.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/9606A290-2647-DF11-A095-E41F13181AB4.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/9451B34F-1C47-DF11-B75C-E41F13181A5C.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/80CCEE69-1E47-DF11-9216-00215E2211F4.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/76B9DA6A-1C47-DF11-B13C-00215E222340.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/74489C67-2647-DF11-9B52-E41F13181588.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/6E8FD06A-1C47-DF11-A1DF-00215E21D9F6.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/6E872979-1C47-DF11-87DE-00215E21D540.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/646F4C0E-1D47-DF11-9C8E-00215E21D7C8.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/62D1695A-1C47-DF11-9C53-00215E21D786.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/5CAAC1A3-1B47-DF11-9B5B-00215E21DBBE.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/5A173FCA-2547-DF11-8F56-E41F131816A0.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/5670C812-1D47-DF11-8020-E41F13181890.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/52597D68-1E47-DF11-B7D1-E41F131816B4.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/4ECB6F7B-1E47-DF11-833D-E41F131816A0.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/4EB42E67-1C47-DF11-A1FA-00215E2205AC.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/4EB42E67-1C47-DF11-A1FA-00215E2205AC.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/4AEB1C64-1C47-DF11-BA6A-00215E221B48.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/48CCB914-1D47-DF11-B3D2-E41F13181044.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/3E489C57-1C47-DF11-A6EC-00215E93ED9C.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/3C04D1FE-1C47-DF11-8E76-E41F1318170C.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/3AE57490-2647-DF11-AA52-E41F13181CF8.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/38AC77A1-1B47-DF11-B015-E41F13181CA4.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/2AD2CC1E-1847-DF11-BCE3-00215E21D57C.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/2A4DFD09-1D47-DF11-9D16-00215E2223D6.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/2A407379-1C47-DF11-A7F2-00215E21DAF2.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/289FB808-1D47-DF11-A091-00215E222808.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/28160333-1847-DF11-BAD5-00215E21D702.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/220A4374-1C47-DF11-9FA1-00215E21DF18.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/20D4E62F-1847-DF11-8D41-00215E2222A4.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/1C8DD3A3-1B47-DF11-BAF8-E41F1318168C.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/169CCA06-1D47-DF11-B2C0-00215E221EEA.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/102D9F52-1C47-DF11-B437-00215E22181E.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/0EB6DB6E-1E47-DF11-8D0B-00215E93DCFC.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/08056A58-1C47-DF11-A42D-00215E21DAAA.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0025/06E9D555-1C47-DF11-9E40-00215E2219E6.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0024/E2CAFC76-0C47-DF11-9784-00215E93E7DC.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0024/94318864-1147-DF11-B2FD-E41F131815B8.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0024/66451B78-0C47-DF11-8CB0-00215E93EE44.root',
'/store/mc/Spring10/Qstar_DiJet1200/GEN-SIM-RECO/START3X_V26_S09-v1/0024/5A3665E0-1047-DF11-9537-00215E22175E.root'
)
elif (whichfiles==3):
thefileNames = cms.untracked.vstring()
############# Define the source file ###############
process.source = cms.Source("PoolSource",
fileNames = thefileNames
)
if (whichfiles ==3):
thefileNames.extend( [
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/F4FDEF3F-9CF1-DF11-9AC7-00304867FD2B.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/DE3FF3C8-A2F1-DF11-AF5A-0030483355A8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/C84D7DC5-CEF1-DF11-BF16-0002C90A3696.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/C0E8D0BD-A4F1-DF11-805E-00304867FEBB.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/BE7BD0DF-D8F1-DF11-B336-00304867FD4F.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/9E71C0AA-A2F1-DF11-976B-00304867FF17.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/861C09C8-A4F1-DF11-A452-00304867FD67.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/6A6F80C3-9BF1-DF11-A1D2-0023546BA20F.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/626D08D6-CEF1-DF11-A5DB-00E081237A11.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/48ED637F-D2F1-DF11-88AB-00238BCE45B0.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/2E8F9757-EEF1-DF11-A6AB-001A9243D5C7.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0024/1241B8C7-C2F1-DF11-BF17-0002C90A344C.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/FEEB3B9A-7BF1-DF11-858B-001A9243D5E7.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/FEAA5C75-B1F1-DF11-92F7-0002C90A370A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/FC7E715F-9FF1-DF11-A5F9-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/FAFB83A2-56F1-DF11-A05B-0015174F088C.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/FAF0F14D-75F1-DF11-A206-001A9243D537.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/FAC426AD-80F1-DF11-A913-003048322B3E.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/FAAB6CBC-49F1-DF11-AEDA-0019B9CADC3D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/FA566CF9-B6F1-DF11-AE12-0002C90A34A8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/FA0D26DA-88F1-DF11-8FA8-00238BCE45EC.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/F8AB2811-5EF1-DF11-A71B-A4BADB1E65B1.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/F62D5C87-B1F1-DF11-B1D6-0002C90B742A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/F49A8284-5FF1-DF11-B548-0025901D0C54.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/F418C6FF-81F1-DF11-85BB-00238BBD7674.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/F2AF5FA2-56F1-DF11-BA54-0015174ED2F2.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/F2A07C3E-57F1-DF11-9F64-A4BADB1C5D42.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/F26EB9A8-9BF1-DF11-ADE3-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/F248E247-9AF1-DF11-8FAC-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/F0EB31E7-5FF1-DF11-8208-A4BADB1E6796.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/EEAD2A29-A6F1-DF11-8D47-0002C90A36AA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/EC8E1B98-A8F1-DF11-BEC7-0002C90A34A2.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/E820F4EF-4EF1-DF11-BC2F-00E08120B64F.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/E65B3AD3-9BF1-DF11-9A32-0002C90A3414.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/E6206705-82F1-DF11-A572-00238BCE463C.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/E4CD2AB7-61F1-DF11-804E-842B2B019EA1.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/E4A87E79-57F1-DF11-9BFA-842B2B1807B2.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/E27C3FD4-9BF1-DF11-BD20-0002C90A3414.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/E2772FF1-B6F1-DF11-B762-0002C90A3642.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/E24E4111-5EF1-DF11-97EA-842B2B42B536.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/E0643905-9FF1-DF11-9EEE-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/E0434024-59F1-DF11-899D-842B2B17E35B.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/DE61E846-A4F1-DF11-9490-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/DC5F280C-BCF1-DF11-B624-0002C90A34A2.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/DA9677AE-77F1-DF11-AC2C-0025901D08B8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/D8203073-B3F1-DF11-AC88-0002C90A34A2.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/D6547615-9EF1-DF11-8B7A-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/D04A2B71-B1F1-DF11-ADCC-0002C90A370A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/D02E718B-68F1-DF11-BCA3-842B2B1810D5.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/CE85F8CC-B8F1-DF11-86B0-0002C90A3526.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/C89D3872-7BF1-DF11-8556-00304834BB58.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/C841C4C9-53F1-DF11-8765-001731EB1E48.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/C63AAE0D-89F1-DF11-B8AA-0023546BA347.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/C4CC68C9-A0F1-DF11-9FB2-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/C2F099F6-A7F1-DF11-95AB-0002C90A3562.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/C2B5986C-AAF1-DF11-9839-0002C90B7FCA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/C28CA099-49F1-DF11-8F12-001EC94B4F72.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/C0C301F9-99F1-DF11-90FD-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/C0609469-9FF1-DF11-BC6E-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/C00F51B8-85F1-DF11-AE3A-001A92971CDD.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/BEFA2F14-A8F1-DF11-90FC-0002C90A36FC.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/BE804392-B3F1-DF11-973F-0002C90A3642.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/BE4F996C-AAF1-DF11-9044-0002C90B7FCA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/BADD38C8-98F1-DF11-BF02-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/BADB5A25-C1F1-DF11-9759-0002C90B3976.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/BA773002-7DF1-DF11-A2C8-E0CB4E2E4E6A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/BA762614-80F1-DF11-B41A-001A92971C6D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/BA0EB2F0-5CF1-DF11-8E11-0025901D08EC.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/B8E7BFD5-7EF1-DF11-9696-001CC05CC464.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/B8C3A2DE-A2F1-DF11-AF2C-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/B8631A72-A2F1-DF11-8D14-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/B4B3B985-62F1-DF11-9C98-001731EB1F36.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/B0F888A0-55F1-DF11-A646-0026B93B21AE.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/B0DFB321-8BF1-DF11-A5A0-00304865C331.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/B0622FD6-96F1-DF11-819A-0002C90A3698.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/AEB0F0D7-A0F1-DF11-A714-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/ACA8612C-A4F1-DF11-AC11-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/AC532319-46F1-DF11-9905-00E081416320.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/AAC817A8-B8F1-DF11-A9DC-0002C90A3526.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/AABE3FF2-5FF1-DF11-9AC7-A4BADB1C5D42.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/A8FD6976-6AF1-DF11-B218-A4BADB22B643.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/A8556918-9EF1-DF11-BD12-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/A8033BD7-9BF1-DF11-B629-0002C90A3414.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/A67557E7-A9F1-DF11-8619-0002C90B7412.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/A63C3F9F-9DF1-DF11-845C-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/A433FC40-9AF1-DF11-B99A-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/A42F1DC0-53F1-DF11-9C58-842B2B42BC3A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/9C70BD05-A8F1-DF11-9766-0002C90A36FC.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/9AA1694C-81F1-DF11-A62A-00238BCE4616.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/9A5051AC-44F1-DF11-ACFB-842B2B0A39C8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/98E74C3E-77F1-DF11-80B2-003048F59728.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/98D16CAB-50F1-DF11-B662-0019B9CAFE71.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/98B0F425-A6F1-DF11-940D-0002C90A36B8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/96C25F29-82F1-DF11-A832-003048F5970A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/96BE27D3-5FF1-DF11-816D-00E081551CFC.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/96B30CFB-99F1-DF11-96B8-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/961DDD24-A4F1-DF11-B169-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/9601D204-82F1-DF11-844D-003048344BF8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/94761063-9FF1-DF11-9887-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/9452E9F2-5FF1-DF11-B5E9-A4BADB22A4AE.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/94399AB8-98F1-DF11-8D13-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/8EF10C46-B9F1-DF11-9101-0002C90A3642.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/8EEFB29F-9DF1-DF11-AC11-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/8C34DCFE-81F1-DF11-A836-003048F5B69A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/8AFC6334-A4F1-DF11-8231-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/8ADCE3B2-89F1-DF11-8C34-00304832293E.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/8A5A3212-5EF1-DF11-96FC-0026B93A0356.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/86F07028-A4F1-DF11-B25F-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/849007DE-A5F1-DF11-AB39-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/842A18A9-B7F1-DF11-BFC0-0002C90A3428.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/82C0303A-57F1-DF11-80D2-A4BADB1E6F7A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/820CCAC9-91F1-DF11-B8D1-00304867FEAF.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/80914757-5EF1-DF11-82A5-0026B937D207.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/7CE18D67-98F1-DF11-9BAF-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/7C569530-B7F1-DF11-A763-0002C90A3642.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/7A9F190E-5FF1-DF11-BA2E-0025901D08E8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/7A34CE14-82F1-DF11-87BB-001A9243D62B.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/78E55DB3-B8F1-DF11-A028-0002C90A344C.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/7889EEE3-4EF1-DF11-8E78-842B2B17F73D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/7835D19C-9DF1-DF11-A533-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/76BD67F8-7FF1-DF11-B1DD-001A9243D57D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/74CE1485-8CF1-DF11-A5B8-00304867FE1B.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/74CCF51B-9EF1-DF11-A595-0002C90A36AA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/745D968E-68F1-DF11-8D25-0013D4892034.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/721DA1F6-5FF1-DF11-B379-842B2B1807B2.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/70DC7FA9-61F1-DF11-B4BE-842B2B42D35D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/70CB626E-B1F1-DF11-9DA7-0002C90A370A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/6EB0FF1C-A6F1-DF11-926A-0002C90A36AA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/6E0F34FD-55F1-DF11-ADC2-003048F59728.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/6AA654D1-A0F1-DF11-B16C-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/688F037F-62F1-DF11-B225-00093D122E06.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/643C5C86-B3F1-DF11-93E2-0002C90A34A8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/62157FEC-99F1-DF11-A13C-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/606196D5-BDF1-DF11-AD1B-0002C90A3696.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/5E894CD5-96F1-DF11-B89A-0002C90A3698.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/5C724B61-52F1-DF11-AF07-003048339B04.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/5C1AFC79-B3F1-DF11-A8C5-0002C90B7F2A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/5A2A34B8-57F1-DF11-B579-0025901D08D6.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/5A0FDE19-C1F1-DF11-BA5B-0002C90A36AA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/56A945CB-8AF1-DF11-AE2C-00304867FF23.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/560FFA77-50F1-DF11-8F59-00304865C338.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/54D5A978-B3F1-DF11-9D42-0002C90B3974.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/52F2F50D-A8F1-DF11-BFAE-0002C90A36FC.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/52DEED70-52F1-DF11-802B-0015174ED5B2.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/5250A0AD-9BF1-DF11-87CE-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/523D7BA9-55F1-DF11-BAFA-842B2B17F557.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/521868E8-4EF1-DF11-85A9-842B2B17E8C6.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/5062DB3D-52F1-DF11-A1AF-0019B9CB020F.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/504A3591-49F1-DF11-B7A2-0026B937D37D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/501E0B9C-76F1-DF11-928C-00304867FDAF.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/4EDF877A-A2F1-DF11-BDBC-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/4E35E675-57F1-DF11-8497-842B2B1815B3.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/4CCB4244-61F1-DF11-987B-00238B8A3CEE.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/4CB0604A-71F1-DF11-8B09-A4BADB1E6055.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/4C33CF4B-6AF1-DF11-A011-001731EF5DA9.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/40CA0154-9AF1-DF11-8C15-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/40A3382B-A4F1-DF11-8E31-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/409FC06F-7CF1-DF11-858E-001A92971C6D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/401E8B7D-57F1-DF11-A57F-0026B93AA8FF.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/3E90D640-A1F1-DF11-8520-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/3E6B99C6-A0F1-DF11-9F10-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/3E279653-BAF1-DF11-B92E-0002C90A36B8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/3C65E34B-98F1-DF11-A784-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/3A7BA10B-84F1-DF11-B4A4-0026180A8746.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/38B9C6B0-55F1-DF11-80AB-0026B939DCF3.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/38455B25-61F1-DF11-9970-003048770C64.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/365B8EE2-A2F1-DF11-82DF-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/36437AA5-61F1-DF11-B9F8-0026B93785F5.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/360DD1F4-5FF1-DF11-ADDE-842B2B18178A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/32685A03-6CF1-DF11-AE08-001731EB1E10.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/2CE14BAD-49F1-DF11-800B-0019B9CB01E8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/2C482D7A-B1F1-DF11-83D7-0002C90B742A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/2C274A41-8FF1-DF11-B84A-485B3919F14E.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/2A9DF6AE-56F1-DF11-8F19-0013D4892044.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/2A692A5C-9FF1-DF11-B03D-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/2A65F390-5FF1-DF11-8A05-00238BCE4618.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/283BD49F-55F1-DF11-AF2C-842B2B17E35B.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/2834A370-44F1-DF11-80AC-A4BADB1C4493.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/24D6F864-57F1-DF11-A3D1-0015174ED4F6.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/2044A994-B7F1-DF11-9DFA-0002C90A3428.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/1EAFE1DF-A2F1-DF11-93E6-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/1AF8996C-AAF1-DF11-A1FC-0002C90B7FCA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/1A9D4567-AAF1-DF11-B8F5-0002C90B7FCA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/1A292E53-5EF1-DF11-AFB6-842B2B17E35B.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/1888E33C-80F1-DF11-B6DA-0025901D08B8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/186738F0-A7F1-DF11-823E-0002C90A355A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/16C28946-98F1-DF11-A8A6-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/140D23D6-4EF1-DF11-A384-0026B937D37D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/12B87E35-9AF1-DF11-8772-00304867FEBB.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/127B5B49-82F1-DF11-B5B6-003048F5B2AC.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/1075A5A9-44F1-DF11-9C2F-0026B939EA8A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/105EB874-82F1-DF11-8443-00304865C465.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/0E9B5859-5EF1-DF11-8C50-842B2B17E3BA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/0E572BC2-B1F1-DF11-8D67-0002C90B742A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/0CD7CB4A-6AF1-DF11-8507-001731EF5E73.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/0C91FEF6-99F1-DF11-9A15-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/0C1C347A-92F1-DF11-9476-003048322A48.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/0A1092A2-56F1-DF11-A915-0015174F0A98.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/0663D14B-6AF1-DF11-88E8-001731EB1DE6.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/0409D2A4-85F1-DF11-813E-00304867FD77.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/024780B8-9BF1-DF11-BC92-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/0222799A-68F1-DF11-8F9B-0015F2399187.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/00E2E93E-A4F1-DF11-9D71-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/00C17C88-57F1-DF11-BD7D-003048344C1A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0023/00984235-A4F1-DF11-8456-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/FCCA7CB6-91F1-DF11-B5F4-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/FC77E073-87F1-DF11-903D-0002C90A3698.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/FC31FB20-29F1-DF11-8554-0026B93785EC.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/FAB223CA-96F1-DF11-A262-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/F6DA8E64-8BF1-DF11-8C9C-0002C90B7426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/F293382A-76F1-DF11-B126-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/F0F7A7C7-2AF1-DF11-A7BE-0026B939DCF3.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/F01B5EB8-91F1-DF11-89C0-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/EC589203-89F1-DF11-92E1-0002C90A36E2.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/E8866428-86F1-DF11-8E96-1CC1DEEB8798.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/E6D74457-73F1-DF11-93BC-842B2B17EA37.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/E4C0EC7B-1FF1-DF11-A409-001EC94BA169.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/E45934C1-8FF1-DF11-9AC3-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/E26C1402-8BF1-DF11-BB1A-0002C90A3492.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/DECD28ED-88F1-DF11-98F6-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/DCA47526-29F1-DF11-BC8E-0019B9CABE2A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/DC6FE33B-76F1-DF11-85A9-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/DC1F5FC7-91F1-DF11-8F73-0002C90A3698.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/DA730F72-44F1-DF11-BE2E-0026B937D38F.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/DA314767-8BF1-DF11-BF3B-0002C90B7426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/D8E03257-3FF1-DF11-978F-0026B93A0356.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/D6D2C3DE-36F1-DF11-A383-A4BADB1C5D42.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/D63E2958-41F1-DF11-B663-842B2B17E35B.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/D2F467AF-8FF1-DF11-8115-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/D2DF5DDC-36F1-DF11-85FB-0026B9392629.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/D29280BF-3CF1-DF11-B070-842B2B17E8C6.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/D04C9821-29F1-DF11-AE9B-842B2B019EE5.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/D010CFD3-8FF1-DF11-88F2-0002C90B39A0.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/CE21F080-71F1-DF11-8840-0002C90A3408.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/CA8FD072-7BF1-DF11-B759-0002C90A3402.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/C847FE89-3FF1-DF11-881D-842B2B42B584.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/C6D3B9C9-96F1-DF11-AFB1-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/C4C9380A-89F1-DF11-9D69-0002C90B39A0.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/C2B0006A-95F1-DF11-B151-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/C243A1D0-23F1-DF11-9CD4-0026B93AA8FF.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/C2305DF1-7CF1-DF11-8B0F-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/BAE1D8FC-88F1-DF11-B896-0002C90B7F2E.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/BA032B27-86F1-DF11-9445-1CC1DEEB8798.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/B8AFB458-30F1-DF11-87AF-00E081237305.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/B48031B8-22F1-DF11-A8A5-842B2B42B536.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/AE5B4629-86F1-DF11-8E28-0002C90B7F5E.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/AC9CE5F8-3CF1-DF11-A9F3-842B2B42B2B1.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/A8E34E6B-87F1-DF11-A032-0002C90A3698.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/A00F3767-75F1-DF11-A5EE-0002C90A3562.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/98F54F69-95F1-DF11-9C75-0002C90B7488.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/9618919C-91F1-DF11-8D5D-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/944A5A3D-93F1-DF11-AC75-0002C90A36AA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/924B40CC-83F1-DF11-837B-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/8E54CFF3-36F1-DF11-85A7-842B2B1812E7.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/8C1C52DD-36F1-DF11-B96A-842B2B180CC3.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/8845A7CD-94F1-DF11-9669-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/825261BA-32F1-DF11-A5B3-0026B93A0356.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/824328D2-94F1-DF11-B96C-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/7E209BCF-96F1-DF11-A95A-0002C90A3698.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/7C3E2B52-22F1-DF11-8E47-A4BADB1E763D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/7C2624D6-8FF1-DF11-8EFE-0002C90A3462.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/78DD9071-75F1-DF11-BBA0-0002C90A36FC.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/7252AD61-95F1-DF11-A467-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/720C4FD1-83F1-DF11-82F4-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/6EC0F5C1-3CF1-DF11-B169-A4BADB1E67BA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/6CF8B100-89F1-DF11-95E3-0002C90A36B8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/60E48BBD-85F1-DF11-80C0-0002C90B741E.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/5E941A31-76F1-DF11-AF15-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/5CC1127E-1FF1-DF11-BAA4-842B2B17EE7F.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/56D61A14-86F1-DF11-A71B-0002C90B7F5E.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/5248729D-96F1-DF11-8054-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/50E8FFE3-83F1-DF11-B2AF-0002C90A3698.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/4C56F8F7-3AF1-DF11-AC2A-842B2B42BCF8.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/4A8C7C7C-71F1-DF11-91CD-0002C90B741E.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/46193CFC-8AF1-DF11-86EE-0002C90A3408.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/441DA07F-33F1-DF11-86F3-842B2B1811D7.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/44050E3E-29F1-DF11-B8DD-842B2B17F73D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/3CC70D1F-93F1-DF11-864B-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/3AD4AFAC-8FF1-DF11-A2DD-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/3A2FA952-22F1-DF11-A5A7-0026B937D268.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/38654AB8-32F1-DF11-B996-842B2B17367A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/32F1ED0D-8BF1-DF11-897E-0002C90A3492.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/32C2EF83-1FF1-DF11-84BA-0019B9CABE34.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/32AEF16E-87F1-DF11-95CD-0002C90A3698.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/2ECBDB8B-3FF1-DF11-AF7A-A4BADB1E6796.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/2E4D12C2-3CF1-DF11-A8C9-0026B937C6C9.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/2E3F0FFF-88F1-DF11-99A9-0002C90A3414.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/2872C171-87F1-DF11-8462-0002C90A36AA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/26F0CDB3-8FF1-DF11-A628-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/223727C7-85F1-DF11-BE56-0002C90A3408.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/1E4470C6-2AF1-DF11-B08F-A4BADB1E6602.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/1C068132-93F1-DF11-8D84-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/182317A3-82F1-DF11-8476-0002C90B3974.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/1657743F-93F1-DF11-AFAC-0002C90A36AA.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/161AFE9B-7BF1-DF11-85DA-0002C90A3402.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/14AC342C-93F1-DF11-971E-0002C90A3426.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/1432AAB5-22F1-DF11-9360-0026B937D3B3.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/126F6CB9-3AF1-DF11-9C0B-842B2B42B2B1.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/10427462-95F1-DF11-AAB2-0002C90A3690.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/0C8A206C-91F1-DF11-AAFD-0002C90B743A.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/0AEF6E06-41F1-DF11-87E8-A4BADB1E763D.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/063B0197-3AF1-DF11-9FC8-842B2B17F557.root',
'/store/mc/Fall10/QCD_Pt-15to3000_TuneD6T_Flat_7TeV-pythia6/GEN-SIM-RECO/START38_V12-v1/0022/02E62CA9-91F1-DF11-812A-0002C90B743A.root'
] )
##-------------------- Communicate with the DB -----------------------
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = 'START38_V13::All'
############# Include the jet corrections ##########
process.load("JetMETCorrections.Configuration.DefaultJEC_cff")
# set the record's IOV. Must be defined once. Choose ANY correction service. #
if whichfiles == 1:
correctionsToGet = "ak7CaloL2L3Residual"
else:
correctionsToGet = "ak7CaloL2L3"
############# Correct Calo Jets on the fly #########
process.dijetAna = cms.EDAnalyzer("CMSDAS11DijetTestAnalyzer",
jetSrc = cms.InputTag("ak7CaloJets"),
vertexSrc = cms.InputTag("offlinePrimaryVertices"),
jetCorrections = cms.string(correctionsToGet),
innerDeltaEta = cms.double(0.7),
outerDeltaEta = cms.double(1.3),
JESbias = cms.double(1.0)
)
############# Path ###########################
process.p = cms.Path(process.dijetAna)
| 126.970655
| 157
| 0.755174
| 8,409
| 56,248
| 4.848258
| 0.145677
| 0.06267
| 0.089529
| 0.118914
| 0.730015
| 0.730015
| 0.730015
| 0.730015
| 0.730015
| 0.730015
| 0
| 0.292669
| 0.101995
| 56,248
| 442
| 158
| 127.257919
| 0.514462
| 0.012054
| 0
| 0.004926
| 0
| 0.899015
| 0.862434
| 0.860753
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.002463
| 0
| 0.002463
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
08c1f6a329f2c09e4f3947e4d6a130b5108ebee9
| 159
|
py
|
Python
|
scryptos/crypto/attack/__init__.py
|
scryptos/scryptoslib
|
bdde5b26dfbf7473b53c22408f97db44821ccbb3
|
[
"MIT"
] | 30
|
2018-10-10T13:48:22.000Z
|
2022-03-14T07:03:57.000Z
|
scryptos/crypto/attack/__init__.py
|
scryptos/scryptoslib
|
bdde5b26dfbf7473b53c22408f97db44821ccbb3
|
[
"MIT"
] | 2
|
2018-10-12T10:05:03.000Z
|
2020-05-18T22:53:15.000Z
|
scryptos/crypto/attack/__init__.py
|
scryptos/scryptoslib
|
bdde5b26dfbf7473b53c22408f97db44821ccbb3
|
[
"MIT"
] | 5
|
2018-10-10T16:11:54.000Z
|
2021-04-04T13:13:53.000Z
|
import scryptos.crypto.attack.rsautil as rsautil
import scryptos.crypto.attack.knapsackutil as knapsackutil
import scryptos.crypto.attack.prngutil as prngutil
| 39.75
| 58
| 0.867925
| 21
| 159
| 6.571429
| 0.380952
| 0.304348
| 0.434783
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 159
| 3
| 59
| 53
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3ee35855c568066d8b02fa2e9b7cee643fcb257b
| 9,910
|
py
|
Python
|
Macroeconomia/Argentina/IndicadoresEmpleoDesocupacion.py
|
alejivo/Macroeconomics
|
82091ab013774fd6d652e4f9874599b07b1ff152
|
[
"MIT"
] | null | null | null |
Macroeconomia/Argentina/IndicadoresEmpleoDesocupacion.py
|
alejivo/Macroeconomics
|
82091ab013774fd6d652e4f9874599b07b1ff152
|
[
"MIT"
] | null | null | null |
Macroeconomia/Argentina/IndicadoresEmpleoDesocupacion.py
|
alejivo/Macroeconomics
|
82091ab013774fd6d652e4f9874599b07b1ff152
|
[
"MIT"
] | null | null | null |
import pandas as pd
import io
import requests
import json
class IndicadoresEmpleoDesocupacion:
def __init__(self):
"""
Los indicadores de empleo y desocupacion se basan en gran medida en la
EPC (encuesta permanente de hogares) en 31 aglomeraciones urbanas.
"""
pass
def getTasaActividad(self, periodo = "Anual"):
"""
La tasa de actividad es PEA/PoblacionTotal
Se considera como una tasa indicadora de la oferta laboral por parte
de los trabajadores.
Parameters
----------
periodo : str, optional (puede ser "Anual", "Trimestral")
DESCRIPTION. The default is "Anual".
Returns
-------
pd.DataFrame().
"""
#Obtener la url de descarga del cvs
urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-principales-variables-ocupacionales-eph-continua-actividad"
s=requests.get(urlPackage).content
objJson = json.loads(s)
resultado = objJson['result']['resources']
selector = 1 if periodo == "Trimestral" else 0 #si no es trimestral siempre es anual
ultimoResultado = resultado[selector]
urlDescarga = ultimoResultado['url']
descripcion = ultimoResultado['description']
print("Descargando: {}".format(descripcion))
print("Archivo: {}".format(urlDescarga))
#Descargar la url con cvs y generar pandas dataframe
contenidoCVS = requests.get(urlDescarga).content
flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))
df_temp = pd.read_csv(flujoCVS)
#transform string to datetime
df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')
df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date
#set index
df_temp.set_index('indice_tiempo', inplace=True)
return df_temp
def getTasaEmpleo(self, periodo = "Anual"):
"""
La tasa de empleo se calcula como poblacion ocupada/poblacion total
Se concidera como una tasa representativa de la demanda laboral ejercida
por la empresas.
Parameters
----------
periodo : str, optional (puede ser "Anual", "Trimestral")
DESCRIPTION. The default is "Anual".
Returns
-------
pd.DataFrame().
"""
#Obtener la url de descarga del cvs
urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-principales-variables-ocupacionales-eph-continua-empleo"
s=requests.get(urlPackage).content
objJson = json.loads(s)
resultado = objJson['result']['resources']
selector = 1 if periodo == "Trimestral" else 0 #si no es trimestral siempre es anual
ultimoResultado = resultado[selector]
urlDescarga = ultimoResultado['url']
descripcion = ultimoResultado['description']
print("Descargando: {}".format(descripcion))
print("Archivo: {}".format(urlDescarga))
#Descargar la url con cvs y generar pandas dataframe
contenidoCVS = requests.get(urlDescarga).content
flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))
df_temp = pd.read_csv(flujoCVS)
#transform string to datetime
df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')
df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date
#set index
df_temp.set_index('indice_tiempo', inplace=True)
return df_temp
def getTasaDesocupacion(self, periodo = "Anual"):
"""
Se calcula como: poblacion desocupada/PEA
Parameters
----------
periodo : str, optional (puede ser "Anual", "Trimestral")
DESCRIPTION. The default is "Anual".
Returns
-------
pd.DataFrame().
"""
#Obtener la url de descarga del cvs
urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-principales-variables-ocupacionales-eph-continua-desempleo"
s=requests.get(urlPackage).content
objJson = json.loads(s)
resultado = objJson['result']['resources']
selector = 1 if periodo == "Trimestral" else 0 #si no es trimestral siempre es anual
ultimoResultado = resultado[selector]
urlDescarga = ultimoResultado['url']
descripcion = ultimoResultado['description']
print("Descargando: {}".format(descripcion))
print("Archivo: {}".format(urlDescarga))
#Descargar la url con cvs y generar pandas dataframe
contenidoCVS = requests.get(urlDescarga).content
flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))
df_temp = pd.read_csv(flujoCVS)
#transform string to datetime
df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')
df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date
#set index
df_temp.set_index('indice_tiempo', inplace=True)
return df_temp
def getTasaSubocupacionDemandante(self, periodo = "Anual"):
"""
Se calcula como poblacion desocupada demandante/PEA
Parameters
----------
periodo : str, optional (puede ser "Anual", "Trimestral")
DESCRIPTION. The default is "Anual".
Returns
-------
pd.DataFrame().
"""
#Obtener la url de descarga del cvs
urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-principales-variables-ocupacionales-eph-continua-subocupacion-demandante"
s=requests.get(urlPackage).content
objJson = json.loads(s)
resultado = objJson['result']['resources']
selector = 1 if periodo == "Trimestral" else 0 #si no es trimestral siempre es anual
ultimoResultado = resultado[selector]
urlDescarga = ultimoResultado['url']
descripcion = ultimoResultado['description']
print("Descargando: {}".format(descripcion))
print("Archivo: {}".format(urlDescarga))
#Descargar la url con cvs y generar pandas dataframe
contenidoCVS = requests.get(urlDescarga).content
flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))
df_temp = pd.read_csv(flujoCVS)
#transform string to datetime
df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')
df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date
#set index
df_temp.set_index('indice_tiempo', inplace=True)
return df_temp
def getTasaSubocupacionNoDemandante(self, periodo = "Anual"):
"""
Se calcula como poblacion desocupada NO demandante/PEA
Parameters
----------
periodo : str, optional (puede ser "Anual", "Trimestral")
DESCRIPTION. The default is "Anual".
Returns
-------
pd.DataFrame().
"""
#Obtener la url de descarga del cvs
urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-principales-variables-ocupacionales-eph-continua-subocupacion-no-demandante"
s=requests.get(urlPackage).content
objJson = json.loads(s)
resultado = objJson['result']['resources']
selector = 1 if periodo == "Trimestral" else 0 #si no es trimestral siempre es anual
ultimoResultado = resultado[selector]
urlDescarga = ultimoResultado['url']
descripcion = ultimoResultado['description']
print("Descargando: {}".format(descripcion))
print("Archivo: {}".format(urlDescarga))
#Descargar la url con cvs y generar pandas dataframe
contenidoCVS = requests.get(urlDescarga).content
flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))
df_temp = pd.read_csv(flujoCVS)
#transform string to datetime
df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')
df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date
#set index
df_temp.set_index('indice_tiempo', inplace=True)
return df_temp
def getIndiceSalariosBase2016(self):
"""
Es un indice que estima la evolucion de los salarios de la economia
Base octubre 2016
Returns
-------
pd.DataFrame().
"""
#Obtener la url de descarga del cvs
urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-indice-salarios-base-octubre-2016"
s=requests.get(urlPackage).content
objJson = json.loads(s)
resultado = objJson['result']['resources']
selector = 0 #si no es trimestral o mensual siempre es anual
ultimoResultado = resultado[selector]
urlDescarga = ultimoResultado['url']
descripcion = ultimoResultado['description']
print("Descargando: {}".format(descripcion))
print("Archivo: {}".format(urlDescarga))
#Descargar la url con cvs y generar pandas dataframe
contenidoCVS = requests.get(urlDescarga).content
flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))
df_temp = pd.read_csv(flujoCVS)
#transform string to datetime
df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')
df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date
#set index
df_temp.set_index('indice_tiempo', inplace=True)
return df_temp
| 38.410853
| 151
| 0.616549
| 1,072
| 9,910
| 5.606343
| 0.153918
| 0.04193
| 0.04792
| 0.07188
| 0.881697
| 0.878869
| 0.870882
| 0.870882
| 0.846922
| 0.846922
| 0
| 0.005109
| 0.269223
| 9,910
| 258
| 152
| 38.410853
| 0.824772
| 0.24551
| 0
| 0.826087
| 0
| 0.052174
| 0.231374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06087
| false
| 0.008696
| 0.034783
| 0
| 0.156522
| 0.104348
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
412d713717f58c09a4e40c6b4b29d00aa0b36ffc
| 430,978
|
py
|
Python
|
run_test.py
|
NREL/scout
|
acf38df7ce877cbd8c1c10f4f61fdf1d088fd947
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
run_test.py
|
NREL/scout
|
acf38df7ce877cbd8c1c10f4f61fdf1d088fd947
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
run_test.py
|
NREL/scout
|
acf38df7ce877cbd8c1c10f4f61fdf1d088fd947
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
""" Tests for running the engine """
# Import code to be tested
import run
# Import needed packages
import unittest
import numpy
import copy
import itertools
import os
class CommonTestMeasures(object):
"""Class of common sample measures for tests.
Attributes:
sample_measure (dict): Sample residential measure #1.
sample_measure2 (dict): Sample residential measure #2.
sample_measure3 (dict): Sample commercial measure #1.
"""
def __init__(self):
self.sample_measure = {
"name": "sample measure 1",
"active": 1,
"market_entry_year": None,
"market_exit_year": None,
"market_scaling_fractions": None,
"market_scaling_fractions_source": None,
"measure_type": "full service",
"structure_type": ["new", "existing"],
"climate_zone": ["AIA_CZ1", "AIA_CZ2"],
"bldg_type": ["single family home"],
"fuel_type": {"primary": ["electricity (grid)"],
"secondary": None},
"fuel_switch_to": None,
"end_use": {"primary": ["heating", "cooling"],
"secondary": None},
"technology_type": {"primary": "supply",
"secondary": None},
"technology": {"primary": ["resistance heat",
"ASHP", "GSHP", "room AC"],
"secondary": None},
"markets": {
"Technical potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}}}}
self.sample_measure2 = {
"name": "sample measure 2",
"active": 1,
"market_entry_year": None,
"market_exit_year": None,
"market_scaling_fractions": None,
"market_scaling_fractions_source": None,
"measure_type": "full service",
"structure_type": ["new", "existing"],
"climate_zone": ["AIA_CZ1", "AIA_CZ2"],
"bldg_type": ["single family home"],
"fuel_type": {"primary": ["electricity (grid)"],
"secondary": ["electricity (grid)"]},
"fuel_switch_to": None,
"end_use": {"primary": ["heating", "cooling"],
"secondary": ["lighting"]},
"technology_type": {"primary": "supply",
"secondary": "supply"},
"technology": {"primary": ["resistance heat",
"ASHP", "GSHP", "room AC"],
"secondary": ["general service (LED)"]},
"markets": {
"Technical potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}}}}
self.sample_measure3 = {
"name": "sample measure 3 (commercial)",
"active": 1,
"market_entry_year": None,
"market_exit_year": None,
"market_scaling_fractions": None,
"market_scaling_fractions_source": None,
"measure_type": "full service",
"structure_type": ["new", "existing"],
"climate_zone": ["AIA_CZ1", "AIA_CZ2"],
"bldg_type": ["assembly"],
"fuel_type": {"primary": ["electricity"],
"secondary": None},
"fuel_switch_to": None,
"end_use": {"primary": ["heating", "cooling"],
"secondary": None},
"technology_type": {"primary": "supply",
"secondary": None},
"technology": {"primary": ["resistance heat",
"ASHP", "GSHP", "room AC"],
"secondary": None},
"markets": {
"Technical potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}}}}
self.sample_measure4 = {
"name": "sample measure 4",
"active": 1,
"market_entry_year": None,
"market_exit_year": None,
"market_scaling_fractions": None,
"market_scaling_fractions_source": None,
"measure_type": "full service",
"structure_type": ["new", "existing"],
"climate_zone": ["AIA_CZ1", "AIA_CZ2"],
"bldg_type": ["single family home"],
"fuel_type": {"primary": ["electricity (grid)"],
"secondary": None},
"fuel_switch_to": None,
"end_use": {"primary": ["lighting"],
"secondary": None},
"technology_type": {"primary": "supply",
"secondary": None},
"technology": {"primary": ["general service (CFL)"],
"secondary": None},
"markets": {
"Technical potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}}}}
self.sample_measure5 = {
"name": "sample measure 5 (commercial)",
"active": 1,
"market_entry_year": None,
"market_exit_year": None,
"market_scaling_fractions": None,
"market_scaling_fractions_source": None,
"measure_type": "full service",
"structure_type": ["new", "existing"],
"climate_zone": ["AIA_CZ1", "AIA_CZ2"],
"bldg_type": ["assembly"],
"fuel_type": {"primary": ["electricity"],
"secondary": None},
"fuel_switch_to": None,
"end_use": {"primary": ["lighting"],
"secondary": None},
"technology_type": {"primary": "supply",
"secondary": None},
"technology": {"primary": ["F32T8"],
"secondary": None},
"markets": {
"Technical potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}}}}
class CommonMethods(object):
"""Define common methods for use in all tests below."""
def dict_check(self, dict1, dict2):
"""Check the equality of two dicts.
Args:
dict1 (dict): First dictionary to be compared
dict2 (dict): Second dictionary to be compared
Raises:
AssertionError: If dictionaries are not equal.
"""
# zip() and zip_longest() produce tuples for the items
# identified, where in the case of a dict, the first item
# in the tuple is the key and the second item is the value;
# in the case where the dicts are not of identical size,
# zip_longest() will use the fill value created below as a
# substitute in the dict that has missing content; this
# value is given as a tuple to be of comparable structure
# to the normal output from zip_longest()
fill_val = ('substituted entry', 5.2)
# In this structure, k and k2 are the keys that correspond to
# the dicts or unitary values that are found in i and i2,
# respectively, at the current level of the recursive
# exploration of dict1 and dict2, respectively
for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()),
sorted(dict2.items()),
fillvalue=fill_val):
# Confirm that at the current location in the dict structure,
# the keys are equal; this should fail if one of the dicts
# is empty, is missing section(s), or has different key names
self.assertEqual(k, k2)
# If the recursion has not yet reached the terminal/leaf node
if isinstance(i, dict):
# Test that the dicts from the current keys are equal
self.assertCountEqual(i, i2)
# Continue to recursively traverse the dict
self.dict_check(i, i2)
# At the terminal/leaf node, formatted as a numpy array
# (for input uncertainty test cases)
elif isinstance(i, numpy.ndarray):
self.assertTrue(type(i) == type(i2))
for x in range(0, len(i)):
self.assertAlmostEqual(i[x], i2[x], places=2)
# At the terminal/leaf node, formatted as a point value
else:
self.assertAlmostEqual(i, i2, places=2)
class TestMeasureInit(unittest.TestCase):
"""Ensure that measure attributes are correctly initiated.
Attributes:
sample_measure (object): Residential sample measure object.
attribute_dict (dict): Dict of sample measure attributes.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
cls.sample_measure = CommonTestMeasures().sample_measure
measure_instance = run.Measure(handyvars, **cls.sample_measure)
cls.attribute_dict = measure_instance.__dict__
def test_attributes(self):
"""Compare object attributes to keys from input dict."""
for key in self.sample_measure.keys():
self.assertEqual(
self.attribute_dict[key], self.sample_measure[key])
class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods):
"""Test operation of 'out_break_walk' function.
Verify that function properly applies a climate zone/building
type/end use partition to a total energy or carbon
market/savings value.
Attributes:
a_run (object): Sample analysis engine object.
ok_total (dict): Sample unpartitioned measure results data.
ok_partitions (dict): Sample results partitioning fraction.
ok_out (dict): Sample partitioned measure results data.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
sample_measure = CommonTestMeasures().sample_measure
measure_list = [run.Measure(handyvars, **sample_measure)]
cls.a_run = run.Engine(handyvars, measure_list)
cls.ok_total = {"2009": 100, "2010": 100}
cls.ok_partitions = {
"AIA CZ1": {
"Residential": {
"Heating": {"2009": .10, "2010": .10},
"Cooling": {"2009": .15, "2010": .15}},
"Commercial": {
"Heating": {"2009": .20, "2010": .20},
"Cooling": {"2009": .25, "2010": .25}}},
"AIA CZ2": {
"Residential": {
"Heating": {"2009": .30, "2010": .30},
"Cooling": {"2009": .35, "2010": .35}},
"Commercial": {
"Heating": {"2009": .40, "2010": .40},
"Cooling": {"2009": .45, "2010": .45}}}}
cls.ok_out = {
"AIA CZ1": {
"Residential": {
"Heating": {"2009": 10, "2010": 10},
"Cooling": {"2009": 15, "2010": 15}},
"Commercial": {
"Heating": {"2009": 20, "2010": 20},
"Cooling": {"2009": 25, "2010": 25}}},
"AIA CZ2": {
"Residential": {
"Heating": {"2009": 30, "2010": 30},
"Cooling": {"2009": 35, "2010": 35}},
"Commercial": {
"Heating": {"2009": 40, "2010": 40},
"Cooling": {"2009": 45, "2010": 45}}}}
def test_ok(self):
"""Test for correct function output given valid inputs."""
dict1 = self.a_run.out_break_walk(
self.ok_partitions, self.ok_total)
dict2 = self.ok_out
self.dict_check(dict1, dict2)
class PrioritizationMetricsTest(unittest.TestCase, CommonMethods):
"""Test the operation of the 'calc_savings_metrics' function.
Verify that measure master microsegment inputs yield expected savings
and financial metrics outputs.
Attributes:
handyvars (object): Useful variables across the class.
sample_measure_res (object): Sample residential measure data.
sample_measure_com (object): Sample commercial measure data.
test_adopt_scheme (string): Sample consumer adoption scheme.
ok_rate (float): Sample discount rate.
ok_master_mseg_point (dict): Sample measure master microsegment
including all point values at terminal leaf nodes.
ok_master_mseg_dist1 (dict): Sample measure master microsegment
including energy, carbon, and energy/carbon cost arrays.
ok_master_mseg_dist2 (dict): Sample measure master microsegment
including stock cost array.
ok_master_mseg_dist3 (dict): Sample measure master microsegment
including measure lifetime array.
ok_master_mseg_dist4 (dict): Sample measure master microsegment
including stock cost and measure lifetime array.
ok_out_point_res (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_point' with a residential sample
measure.
ok_out_point_com (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_point' with a residential sample
measure.
ok_out_dist1 (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_dist1' with a residential sample
measure.
ok_out_dist2 (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_dist2' with a residential sample
measure.
ok_out_dist3 (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_dist3' with a residential sample
measure.
ok_out_dist4 (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_dist4' with a residential sample
measure.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
# Reset aeo_years
cls.handyvars.aeo_years = ["2009", "2010"]
cls.sample_measure_res = CommonTestMeasures().sample_measure4
cls.sample_measure_com = CommonTestMeasures().sample_measure5
cls.test_adopt_scheme = 'Max adoption potential'
cls.ok_rate = 0.07
cls.ok_master_mseg_point = {
"stock": {
"total": {
"all": {"2009": 10, "2010": 20},
"measure": {"2009": 15, "2010": 25}},
"competed": {
"all": {"2009": 5, "2010": 10},
"measure": {"2009": 5, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 0, "2010": 50}}},
"carbon": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 50, "2010": 100}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}},
"competed": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}},
"competed": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}},
"competed": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 2}}
cls.ok_master_mseg_dist1 = {
"stock": {
"total": {
"all": {"2009": 10, "2010": 20},
"measure": {"2009": 15, "2010": 25}},
"competed": {
"all": {"2009": 5, "2010": 10},
"measure": {"2009": 5, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {
"2009": numpy.array([16, 27, 31, 6, 51]),
"2010": numpy.array([106, 95, 81, 11, 124])}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {
"2009": numpy.array([6, 7, 1, 16, 1]),
"2010": numpy.array([36, 45, 61, 5, 54])}}},
"carbon": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {
"2009": numpy.array([50.6, 57.7, 58.1, 50, 51.1]),
"2010": numpy.array(
[100.6, 108.7, 105.1, 105, 106.1])}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {
"2009": numpy.array([50.6, 57.7, 58.1, 50, 51.1]),
"2010": numpy.array(
[100.6, 108.7, 105.1, 105, 106.1])}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}},
"competed": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {
"2009": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]),
"2010": numpy.array(
[20.1, 18.7, 21.7, 21.2, 22.5])}},
"competed": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {
"2009": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]),
"2010": numpy.array(
[20.1, 18.7, 21.7, 21.2, 22.5])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {
"2009": numpy.array(
[25.1, 24.7, 23.7, 31.2, 18.5]),
"2010": numpy.array(
[20.1, 18.7, 21.7, 21.2, 22.5])}},
"competed": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {
"2009": numpy.array(
[25.1, 24.7, 23.7, 31.2, 18.5]),
"2010": numpy.array(
[20.1, 18.7, 21.7, 21.2, 22.5])}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 2}}
cls.ok_master_mseg_dist2 = {
"stock": {
"total": {
"all": {"2009": 10, "2010": 20},
"measure": {"2009": 15, "2010": 25}},
"competed": {
"all": {"2009": 5, "2010": 10},
"measure": {"2009": 5, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 0, "2010": 50}}},
"carbon": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 50, "2010": 100}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {
"2009": numpy.array(
[15.1, 12.7, 14.1, 14.2, 15.5]),
"2010": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5])
}},
"competed": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {
"2009": numpy.array(
[15.1, 12.7, 14.1, 14.2, 15.5]),
"2010": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5])
}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}},
"competed": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}},
"competed": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 2}}
cls.ok_master_mseg_dist3 = {
"stock": {
"total": {
"all": {"2009": 10, "2010": 20},
"measure": {"2009": 15, "2010": 25}},
"competed": {
"all": {"2009": 5, "2010": 10},
"measure": {"2009": 5, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 0, "2010": 50}}},
"carbon": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 50, "2010": 100}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}},
"competed": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}},
"competed": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}},
"competed": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}}
cls.ok_master_mseg_dist4 = {
"stock": {
"total": {
"all": {"2009": 10, "2010": 20},
"measure": {"2009": 15, "2010": 25}},
"competed": {
"all": {"2009": 5, "2010": 10},
"measure": {"2009": 5, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 0, "2010": 50}}},
"carbon": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 50, "2010": 100}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {
"2009": numpy.array(
[15.1, 12.7, 14.1, 14.2, 15.5]),
"2010": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5])
}},
"competed": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {
"2009": numpy.array(
[15.1, 12.7, 14.1, 14.2, 15.5]),
"2010": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5])
}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}},
"competed": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}},
"competed": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}}
cls.ok_out_point_res = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {"2009": -5, "2010": -10},
"cost savings (annual)": {"2009": -5, "2010": -10}},
"energy": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 100, "2010": 100},
"cost savings (total)": {"2009": 10, "2010": 15},
"cost savings (annual)": {"2009": 10, "2010": 15}},
"carbon": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 50, "2010": 50},
"cost savings (total)": {"2009": 5, "2010": 15},
"cost savings (annual)": {"2009": 5, "2010": 15}}},
{
"cce": {"2009": -0.01602415, "2010": -0.01111353},
"cce (w/ carbon cost benefits)": {
"2009": -0.04935749, "2010": -0.08611353},
"ccc": {"2009": -1.602415e-08, "2010": -1.111353e-08},
"ccc (w/ energy cost benefits)": {
"2009": -8.269082e-08, "2010": -8.611353e-08}},
{
"anpv": {
"stock cost": {
"residential": {
"2009": numpy.pmt(0.07, 2, 0.4345794),
"2010": numpy.pmt(0.07, 2, 0.2009346)},
"commercial": {"2009": None, "2010": None}},
"energy cost": {
"residential": {
"2009": numpy.pmt(0.07, 2, 1.808018),
"2010": numpy.pmt(0.07, 2, 1.356014)},
"commercial": {"2009": None, "2010": None}},
"carbon cost": {
"residential": {
"2009": numpy.pmt(0.07, 2, 0.9040091),
"2010": numpy.pmt(0.07, 2, 1.356014)},
"commercial": {"2009": None, "2010": None}}},
"irr (w/ energy costs)": {
"2009": 3.45, "2010": 2.44},
"irr (w/ energy and carbon costs)": {
"2009": 4.54, "2010": 4.09},
"payback (w/ energy costs)": {
"2009": 0.25, "2010": 0.33},
"payback (w/ energy and carbon costs)": {
"2009": 0.2, "2010": 0.22}}]
cls.ok_out_point_com = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {"2009": -5, "2010": -10},
"cost savings (annual)": {"2009": -5, "2010": -10}},
"energy": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 100, "2010": 100},
"cost savings (total)": {"2009": 10, "2010": 15},
"cost savings (annual)": {"2009": 10, "2010": 15}},
"carbon": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 50, "2010": 50},
"cost savings (total)": {"2009": 5, "2010": 15},
"cost savings (annual)": {"2009": 5, "2010": 15}}},
{
"cce": {"2009": -0.01602415, "2010": -0.01111353},
"cce (w/ carbon cost benefits)": {
"2009": -0.04935749, "2010": -0.08611353},
"ccc": {"2009": -1.602415e-08, "2010": -1.111353e-08},
"ccc (w/ energy cost benefits)": {
"2009": -8.269082e-08, "2010": -8.611353e-08}},
{
"anpv": {
"stock cost": {
"residential": {"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": numpy.pmt(10.0, 2, -0.4090909),
"rate 2": numpy.pmt(1.0, 2, 0),
"rate 3": numpy.pmt(0.45, 2, 0.1896552),
"rate 4": numpy.pmt(0.25, 2, 0.3),
"rate 5": numpy.pmt(0.15, 2, 0.3695652),
"rate 6": numpy.pmt(0.065, 2, 0.4389671),
"rate 7": -0.25},
"2010": {
"rate 1": numpy.pmt(10.0, 2, -0.4318182),
"rate 2": numpy.pmt(1.0, 2, -0.125),
"rate 3": numpy.pmt(0.45, 2, 0.01724138),
"rate 4": numpy.pmt(0.25, 2, 0.1),
"rate 5": numpy.pmt(0.15, 2, 0.1521739),
"rate 6": numpy.pmt(0.065, 2, 0.2042254),
"rate 7": -0.125}}},
"energy cost": {
"residential": {"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": numpy.pmt(10.0, 2, 0.09917355),
"rate 2": numpy.pmt(1.0, 2, 0.75),
"rate 3": numpy.pmt(0.45, 2, 1.165279),
"rate 4": numpy.pmt(0.25, 2, 1.44),
"rate 5": numpy.pmt(0.15, 2, 1.625709),
"rate 6": numpy.pmt(0.065, 2, 1.820626),
"rate 7": -1},
"2010": {
"rate 1": numpy.pmt(10.0, 2, 0.07438017),
"rate 2": numpy.pmt(1.0, 2, 0.5625),
"rate 3": numpy.pmt(0.45, 2, 0.8739596),
"rate 4": numpy.pmt(0.25, 2, 1.08),
"rate 5": numpy.pmt(0.15, 2, 1.219282),
"rate 6": numpy.pmt(0.065, 2, 1.36547),
"rate 7": -0.75}}},
"carbon cost": {
"residential": {"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": numpy.pmt(10.0, 2, 0.04958678),
"rate 2": numpy.pmt(1.0, 2, 0.375),
"rate 3": numpy.pmt(0.45, 2, 0.5826397),
"rate 4": numpy.pmt(0.25, 2, 0.72),
"rate 5": numpy.pmt(0.15, 2, 0.8128544),
"rate 6": numpy.pmt(0.065, 2, 0.9103132),
"rate 7": -0.5},
"2010": {
"rate 1": numpy.pmt(10.0, 2, 0.07438017),
"rate 2": numpy.pmt(1.0, 2, 0.5625),
"rate 3": numpy.pmt(0.45, 2, 0.8739596),
"rate 4": numpy.pmt(0.25, 2, 1.08),
"rate 5": numpy.pmt(0.15, 2, 1.219282),
"rate 6": numpy.pmt(0.065, 2, 1.36547),
"rate 7": -0.75}}}},
"irr (w/ energy costs)": {
"2009": 3.45, "2010": 2.44},
"irr (w/ energy and carbon costs)": {
"2009": 4.54, "2010": 4.09},
"payback (w/ energy costs)": {
"2009": 0.25, "2010": 0.33},
"payback (w/ energy and carbon costs)": {
"2009": 0.2, "2010": 0.22}}]
cls.ok_out_dist1 = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {"2009": -5, "2010": -10},
"cost savings (annual)": {"2009": -5, "2010": -10}},
"energy": {
"savings (total)": {
"2009": numpy.array([184, 173, 169, 194, 149]),
"2010": numpy.array([194, 205, 219, 289, 176])},
"savings (annual)": {
"2009": numpy.array([94, 93, 99, 84, 99]),
"2010": numpy.array([114, 105, 89, 145, 96])},
"cost savings (total)": {
"2009": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]),
"2010": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])},
"cost savings (annual)": {
"2009": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]),
"2010": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}},
"carbon": {
"savings (total)": {
"2009": numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]),
"2010": numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])},
"savings (annual)": {
"2009": numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]),
"2010": numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])},
"cost savings (total)": {
"2009": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]),
"2010": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])},
"cost savings (annual)": {
"2009": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]),
"2010": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}},
{
"cce": {
"2009": numpy.array([
-0.01306317, -0.01389378, -0.01422262,
-0.01238981, -0.01613170]),
"2010": numpy.array([
-0.01145724, -0.01084246, -0.01014934,
-0.007691022, -0.01262901])},
"cce (w/ carbon cost benefits)": {
"2009": numpy.array([
-0.0396936, -0.04452961, -0.05150073,
-0.006204243, -0.09331291]),
"2010": numpy.array([
-0.1140346, -0.11474490, -0.09371098,
-0.072742925, -0.11206083])},
"ccc": {
"2009": numpy.array([
-1.608851e-08, -1.689124e-08, -1.693885e-08,
-1.602415e-08, -1.614253e-08]),
"2010": numpy.array([
-1.114697e-08, -1.161895e-08, -1.140434e-08,
-1.139849e-08, -1.146315e-08])},
"ccc (w/ energy cost benefits)": {
"2009": numpy.array([
-8.904701e-08, -9.630094e-08, -1.036196e-07,
-7.469082e-08, -6.651191e-08]),
"2010": numpy.array([
-8.587114e-08, -9.682543e-08, -7.964446e-08,
-8.216772e-08, -7.592937e-08])}},
{
"anpv": {
"stock cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 2, 0.4345794)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 2, 0.2009346)])
},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)
}},
"energy cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 1.97074),
numpy.pmt(0.07, 2, 2.043061),
numpy.pmt(0.07, 2, 2.223862),
numpy.pmt(0.07, 2, 1.591056),
numpy.pmt(0.07, 2, 1.356014)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 1.346974),
numpy.pmt(0.07, 2, 1.473535),
numpy.pmt(0.07, 2, 1.202332),
numpy.pmt(0.07, 2, 1.247533),
numpy.pmt(0.07, 2, 1.130011)])
},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)
}},
"carbon cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 0.8859289),
numpy.pmt(0.07, 2, 0.9582496),
numpy.pmt(0.07, 2, 1.139051),
numpy.pmt(0.07, 2, -0.2169622),
numpy.pmt(0.07, 2, 2.079221)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 1.798978),
numpy.pmt(0.07, 2, 1.925539),
numpy.pmt(0.07, 2, 1.654337),
numpy.pmt(0.07, 2, 1.699537),
numpy.pmt(0.07, 2, 1.582016)])
},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)
}}},
"irr (w/ energy costs)": {
"2009": numpy.array([
3.648926, 3.737086, 3.956335, 3.180956, 2.886001]),
"2010": numpy.array([
2.425032, 2.584709, 2.240438, 2.298386, 2.147181])},
"irr (w/ energy and carbon costs)": {
"2009": numpy.array([
4.713113, 4.884221, 5.309580, 2.908860, 5.394281]),
"2010": numpy.array([
4.601286, 4.897553, 4.260683, 4.367373, 4.089454])},
"payback (w/ energy costs)": {
"2009": numpy.array([
0.2392344, 0.2347418, 0.2242152, 0.2659574,
0.2857143]),
"2010": numpy.array([
0.3344482, 0.3194888, 0.3533569, 0.3472222,
0.3636364])},
"payback (w/ energy and carbon costs)": {
"2009": numpy.array([
0.1937984, 0.1879699, 0.1748252, 0.2840909,
0.1724138]),
"2010": numpy.array([
0.2008032, 0.1901141, 0.2145923, 0.2100840,
0.2222222])}}]
cls.ok_out_dist2 = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {
"2009": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]),
"2010": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])},
"cost savings (annual)": {
"2009": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]),
"2010": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}},
"energy": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 100, "2010": 100},
"cost savings (total)": {"2009": 10, "2010": 15},
"cost savings (annual)": {"2009": 10, "2010": 15}},
"carbon": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 50, "2010": 50},
"cost savings (total)": {"2009": 5, "2010": 15},
"cost savings (annual)": {"2009": 5, "2010": 15}}},
{
"cce": {
"2009": numpy.array([
-0.01565543, -0.02450490, -0.01934271, -0.01897398,
-0.01418052]),
"2010": numpy.array([
-0.02466428, -0.02853592, -0.02023954, -0.02715319,
-0.02355809])},
"cce (w/ carbon cost benefits)": {
"2009": numpy.array([
-0.04898876, -0.05783823, -0.05267604,
-0.05230731, -0.04751385]),
"2010": numpy.array([
-0.09966428, -0.10353592, -0.09523954, -0.10215319,
-0.09855809])},
"ccc": {
"2009": numpy.array([
-1.565543e-08, -2.450490e-08, -1.934271e-08,
-1.897398e-08, -1.418052e-08]),
"2010": numpy.array([
-2.466428e-08, -2.853592e-08, -2.023954e-08,
-2.715319e-08, -2.355809e-08])},
"ccc (w/ energy cost benefits)": {
"2009": numpy.array([
-8.232209e-08, -9.117156e-08, -8.600937e-08,
-8.564064e-08, -8.084718e-08]),
"2010": numpy.array([
-9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07,
-9.855809e-08])}},
{
"anpv": {
"stock cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 0.4245794),
numpy.pmt(0.07, 2, 0.6645794),
numpy.pmt(0.07, 2, 0.5245794),
numpy.pmt(0.07, 2, 0.5145794),
numpy.pmt(0.07, 2, 0.3845794)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 0.4459346),
numpy.pmt(0.07, 2, 0.5159346),
numpy.pmt(0.07, 2, 0.3659346),
numpy.pmt(0.07, 2, 0.4909346),
numpy.pmt(0.07, 2, 0.4259346)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"energy cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"carbon cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}}},
"irr (w/ energy costs)":
{"2009": numpy.array([
3.370236, 6.877566, 4.335205, 4.218185, 3.081800]),
"2010": numpy.array([
5.345834, 7.580577, 3.931585, 6.612039, 4.915578])},
"irr (w/ energy and carbon costs)":
{"2009": numpy.array([
4.442382, 8.824726, 5.647891, 5.501689, 4.082098]),
"2010": numpy.array([
8.446248, 11.795815, 6.327488, 10.343948, 7.801544])},
"payback (w/ energy costs)":
{"2009": numpy.array([
0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]),
"2010": numpy.array([
0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])},
"payback (w/ energy and carbon costs)":
{"2009": numpy.array([
0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]),
"2010": numpy.array([
0.1133333, 0.08222222, 0.1488889, 0.09333333,
0.1222222])}}]
cls.ok_out_dist3 = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {"2009": -5, "2010": -10},
"cost savings (annual)": {"2009": -5, "2010": -10}},
"energy": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 100, "2010": 100},
"cost savings (total)": {"2009": 10, "2010": 15},
"cost savings (annual)": {"2009": 10, "2010": 15}},
"carbon": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 50, "2010": 50},
"cost savings (total)": {"2009": 5, "2010": 15},
"cost savings (annual)": {"2009": 5, "2010": 15}}},
{
"cce": {
"2009": numpy.array([
0.03566667, 0.03566667, -0.01602415,
-0.01602415, -0.04694426]),
"2010": numpy.array([
0.05350000, 0.05350000, -0.01111353,
-0.01111353, -0.04976366])},
"cce (w/ carbon cost benefits)": {
"2009": numpy.array([
0.002333333, 0.002333333, -0.04935749,
-0.04935749, -0.0802776]),
"2010": numpy.array([
-0.021500000, -0.021500000, -0.08611353,
-0.08611353, -0.1247637])},
"ccc": {
"2009": numpy.array([
3.566667e-08, 3.566667e-08, -1.602415e-08,
-1.602415e-08, -4.694426e-08]),
"2010": numpy.array([
5.350000e-08, 5.350000e-08, -1.111353e-08,
-1.111353e-08, -4.976366e-08])},
"ccc (w/ energy cost benefits)": {
"2009": numpy.array([
-3.10e-08, -3.10e-08, -8.269082e-08,
-8.269082e-08, -1.136109e-07]),
"2010": numpy.array([
-2.15e-08, -2.15e-08, -8.611353e-08,
-8.611353e-08, -1.247637e-07])}},
{
"anpv": {
"stock cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, -0.5),
numpy.pmt(0.07, 1, -0.5),
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 5, 2.887211)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, -0.5),
numpy.pmt(0.07, 1, -0.5),
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 5, 2.040408)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"energy cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, 0.9345794),
numpy.pmt(0.07, 1, 0.9345794),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 5, 4.100197)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 5, 3.075148)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"carbon cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, 0.4672897),
numpy.pmt(0.07, 1, 0.4672897),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 5, 2.050099)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 5, 3.075148)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}}},
"irr (w/ energy costs)":
{"2009": numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]),
"2010": numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])},
"irr (w/ energy and carbon costs)":
{"2009": numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]),
"2010": numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])},
"payback (w/ energy costs)":
{"2009": numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]),
"2010": numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])},
"payback (w/ energy and carbon costs)":
{"2009": numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]),
"2010": numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}]
cls.ok_out_dist4 = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {
"2009": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]),
"2010": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])},
"cost savings (annual)": {
"2009": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]),
"2010": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}},
"energy": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 100, "2010": 100},
"cost savings (total)": {"2009": 10, "2010": 15},
"cost savings (annual)": {"2009": 10, "2010": 15}},
"carbon": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 50, "2010": 50},
"cost savings (total)": {"2009": 5, "2010": 15},
"cost savings (annual)": {"2009": 5, "2010": 15}}},
{
"cce": {
"2009": numpy.array([
0.036380, 0.019260, -0.01934271,
-0.01897398, -0.04613129]),
"2010": numpy.array([
0.027285, 0.019795, -0.02023954,
-0.02715319, -0.05525120])},
"cce (w/ carbon cost benefits)": {
"2009": numpy.array([
0.003046667, -0.01407333, -0.05267604,
-0.05230731, -0.07946463]),
"2010": numpy.array([
-0.047715000, -0.05520500, -0.09523954,
-0.10215319, -0.13025120])},
"ccc": {
"2009": numpy.array([
3.6380e-08, 1.9260e-08, -1.934271e-08,
-1.897398e-08, -4.613129e-08]),
"2010": numpy.array([
2.7285e-08, 1.9795e-08, -2.023954e-08,
-2.715319e-08, -5.525120e-08])},
"ccc (w/ energy cost benefits)": {
"2009": numpy.array([
-3.028667e-08, -4.740667e-08, -8.600937e-08,
-8.564064e-08, -1.127980e-07]),
"2010": numpy.array([
-4.771500e-08, -5.520500e-08, -9.523954e-08,
-1.021532e-07, -1.302512e-07])}},
{
"anpv": {
"stock cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, -0.51),
numpy.pmt(0.07, 1, -0.27),
numpy.pmt(0.07, 2, 0.5245794),
numpy.pmt(0.07, 2, 0.5145794),
numpy.pmt(0.07, 5, 2.837211)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, -0.255),
numpy.pmt(0.07, 1, -0.185),
numpy.pmt(0.07, 2, 0.3659346),
numpy.pmt(0.07, 2, 0.4909346),
numpy.pmt(0.07, 5, 2.265408)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"energy cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, 0.9345794),
numpy.pmt(0.07, 1, 0.9345794),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 5, 4.100197)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 5, 3.075148)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"carbon cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, 0.4672897),
numpy.pmt(0.07, 1, 0.4672897),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 5, 2.050099)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 5, 3.075148)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}}},
"irr (w/ energy costs)":
{"2009": numpy.array([
0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]),
"2010": numpy.array([
1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])},
"irr (w/ energy and carbon costs)":
{"2009": numpy.array([
1.941176, 4.555556, 5.647891, 5.501689, 4.543007]),
"2010": numpy.array([
4.882353, 7.108108, 6.327488, 10.343948, 8.181351])},
"payback (w/ energy costs)":
{"2009": numpy.array([
0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]),
"2010": numpy.array([
0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])},
"payback (w/ energy and carbon costs)":
{"2009": numpy.array([
0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]),
"2010": numpy.array([
0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}]
cls.ok_savings_mkts_comp_schemes = ["competed", "uncompeted"]
def test_metrics_ok_point_res(self):
"""Test output given residential measure with point value inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_point'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_res)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_point
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# For first test case, verify correct adoption/competition scenario
# keys for measure markets/savings/portfolio metrics
for adopt_scheme in self.handyvars.adopt_schemes:
# Markets
self.assertEqual(list(sorted(
engine_instance.measures[0].markets[adopt_scheme].keys())),
self.ok_savings_mkts_comp_schemes)
# Savings
self.assertEqual(list(sorted(
engine_instance.measures[0].savings[adopt_scheme].keys())),
self.ok_savings_mkts_comp_schemes)
# Portfolio metrics
self.assertEqual(list(sorted(engine_instance.measures[
0].portfolio_metrics[adopt_scheme].keys())),
self.ok_savings_mkts_comp_schemes)
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_point_res[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_point_res[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_point_res[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_point_res[3])
def test_metrics_ok_point_com(self):
"""Test output given commercial measure with point value inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_point'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_com)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_point
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_point_com[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_point_com[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_point_com[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_point_com[3])
def test_metrics_ok_distrib1(self):
"""Test output given residential measure with array inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_dist1'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_res)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_dist1
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_dist1[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist1[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist1[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_dist1[3])
def test_metrics_ok_distrib2(self):
"""Test output given residential measure with array inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_dist2'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_res)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_dist2
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_dist2[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist2[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist2[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_dist2[3])
def test_metrics_ok_distrib3(self):
"""Test output given residential measure with array inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_dist3'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_res)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_dist3
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_dist3[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist3[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist3[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_dist3[3])
def test_metrics_ok_distrib4(self):
"""Test output given residential measure with array inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_dist4'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_res)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_dist4
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_dist4[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist4[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist4[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_dist4[3])
class MetricUpdateTest(unittest.TestCase, CommonMethods):
"""Test the operation of the 'metrics_update' function.
Verify that cashflow inputs generate expected prioritization metric
outputs.
Attributes:
handyvars (object): Useful variables across the class.
measure_list (list): List for Engine including one sample
residential measure.
ok_num_units (int): Sample number of competed units.
ok_base_life (int): Sample baseline technology lifetime.
ok_product_lifetime (float): Sample measure lifetime.
ok_life_ratio (int): Sample measure->baseline lifetime ratio.
ok_base_scost (int): Sample baseline stock cost.
ok_scostsave (int): Sample baseline->measure stock cost delta.
ok_esave (int): Sample measure energy savings.
ok_ecostsave (int): Sample measure energy cost savings.
ok_csave (int): Sample measure avoided carbon emissions.
ok_ccostsave (int): Sample measure avoided carbon costs.
ok_out_dicts (list): Output annuity equivalent Net Present Value
dicts that should be generated given valid sample inputs.
ok_out_array (list): Other financial metric values that should
be generated given valid sample inputs.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
sample_measure = CommonTestMeasures().sample_measure4
cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)]
cls.ok_base_life = 3
cls.ok_product_lifetime = 6.2
cls.ok_life_ratio = 2
cls.ok_base_scost = 1
cls.ok_meas_sdelt = -1
cls.ok_esave = 7.5
cls.ok_ecostsave = 0.5
cls.ok_csave = 50
cls.ok_ccostsave = 1
cls.ok_out_array = [
numpy.pmt(0.07, 6, -0.1837021),
numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6, 4.76654),
None, None, None, 0.62, 1.59, 2, 0.67, 0.005,
-0.13, 7.7e-10, -9.2e-9]
def test_metric_updates(self):
"""Test for correct outputs given valid inputs."""
# Create an Engine instance using sample_measure list
engine_instance = run.Engine(self.handyvars, self.measure_list)
# Record the output for the test run of the 'metric_update'
# function
function_output = engine_instance.metric_update(
self.measure_list[0], self.ok_base_life,
int(self.ok_product_lifetime), self.ok_base_scost,
self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave,
self.ok_csave, self.ok_ccostsave)
# Test that valid inputs yield correct anpv, irr, payback, and
# cost of conserved energy/carbon outputs
for ind, x in enumerate(self.ok_out_array):
if x is not None:
self.assertAlmostEqual(function_output[ind], x, places=2)
else:
self.assertEqual(function_output[ind], x)
class PaybackTest(unittest.TestCase):
"""Test the operation of the 'payback' function.
Verify cashflow input generates expected payback output.
Attributes:
handyvars (object): Useful variables across the class.
measure_list (list): List for Engine including one sample
residential measure.
ok_cashflows (list): Set of sample input cash flows.
ok_out (list): Outputs that should be generated for each
set of sample cash flows.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
sample_measure = CommonTestMeasures().sample_measure
cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)]
cls.ok_cashflows = [[-10, 1, 1, 1, 1, 5, 7, 8], [-10, 14, 2, 3, 4],
[-10, 0, 1, 2], [10, 4, 7, 8, 10], [-100, 0, 1]]
cls.ok_out = [5.14, 0.71, 6.5, 0, 999]
def test_cashflow_paybacks(self):
"""Test for correct outputs given valid inputs."""
# Create an Engine instance using sample_measure list
engine_instance = run.Engine(self.handyvars, self.measure_list)
# Test that valid input cashflows yield correct output payback values
for idx, cf in enumerate(self.ok_cashflows):
self.assertAlmostEqual(engine_instance.payback(cf),
self.ok_out[idx], places=2)
class ResCompeteTest(unittest.TestCase, CommonMethods):
"""Test 'compete_res_primary,' and 'htcl_adj'.
Verify that 'compete_res_primary' correctly calculates primary market
shares and updates master microsegments for a series of competing
residential measures; and that 'htcl_adj' properly accounts for
heating and cooling supply-demand overlaps.
Attributes:
handyvars (object): Useful variables across the class.
test_adopt_scheme (string): Sample consumer adoption scheme.
test_htcl_adj (dict): Sample dict with supply-demand overlap data.
adjust_key1 (string): First sample string for competed demand-side and
supply-side market microsegment key chain being tested.
adjust_key2 (string): Second sample string for competed demand-side and
supply-side market microsegment key chain being tested.
compete_meas1 (dict): Sample residential demand-side cooling measure 1.
compete_meas1_dist (dict): Alternative version of sample residential
demand-side cooling measure 1 including lists of energy/carbon and
associated cost input values instead of point values.
compete_meas2 (dict): Sample residential demand-side cooling measure 2.
compete_meas3 (dict): Sample residential supply-side cooling measure 1.
compete_meas3_dist (dict): Alternative version of sample residential
supply-side cooling measure 1 including lists of stock cost input
values instead of point values.
compete_meas4 (dict): Sample residential supply-side cooling measure 2.
compete_meas5 (dict): Sample residential supply-side cooling measure 3.
measures_all (list): List of all competing/interacting sample Measure
objects with point value inputs.
measures_demand (list): Demand-side subset of 'measures_all'.
measures_supply (list): Supply-side subset of 'measures_all'.
measures_overlap1 (dict): List of supply-side Measure objects and
associated contributing microsegment keys that overlap with
'measures_demand' Measure objects.
measures_overlap2 (dict): List of demand-side Measure objects and
associated contributing microsegment keys that overlap with
'measures_supply' Measure objects.
a_run (object): Analysis engine object incorporating all
'measures_all' objects.
measures_all_dist (list): List including competing/interacting sample
Measure objects with array inputs.
measures_demand_dist (list): Demand-side subset of 'measures_all_dist'.
measures_supply_dist (list): Supply-side subset of 'measures_all_dist'.
measures_overlap1_dist (dict): List of supply-side Measure objects and
associated contributing microsegment keys that overlap with
'measures_demand_dist' Measure objects.
measures_overlap2_dist (dict): List of demand-side Measure objects and
associated contributing microsegment keys that overlap with
'measures_supply_dist' Measure objects.
a_run_dist (object): Engine object incorporating all
'measures_all_dist' objects.
measure_master_msegs_out (dict): Master market microsegments
that should be generated for each Measure object in 'measures_all'
following competition and supply-demand overlap adjustments.
measure_master_msegs_out_dist (dict): Master market microsegments
that should be generated for each Measure object in
'measures_all_dist' following competition and supply-demand overlap
adjustments.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
cls.handyvars.aeo_years = ["2009", "2010"]
cls.handyvars.retro_rate = 0
cls.test_adopt_scheme = "Max adoption potential"
cls.adjust_key1 = str(
('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',
'cooling', 'demand', 'windows', 'existing'))
cls.adjust_key2 = str(
('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))
cls.test_htcl_adj = {
"supply": {
"['AIA_CZ1', 'single family home', 'existing']": {
"total": {
yr: 10 for yr in cls.handyvars.aeo_years},
"total affected": {
yr: 5 for yr in cls.handyvars.aeo_years},
"affected savings": {
yr: 5 for yr in cls.handyvars.aeo_years}},
},
"demand": {
"['AIA_CZ1', 'single family home', 'existing']": {
"total": {
yr: 10 for yr in cls.handyvars.aeo_years},
"total affected": {
yr: 5 for yr in cls.handyvars.aeo_years},
"affected savings": {
yr: 5 for yr in cls.handyvars.aeo_years}},
}}
cls.compete_meas1 = {
"name": "sample compete measure r1",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["windows"],
"technology_type": {"primary": "demand", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}}}}
cls.compete_meas1_dist = {
"name": "sample compete measure r1 dist",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["windows"],
"technology_type": {"primary": "demand", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array([15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array([20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array([15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array([20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array(
[15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array(
[20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array(
[15, 16, 17]),
"2010": numpy.array(
[15, 16, 17])}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array(
[20, 21, 22]),
"2010": numpy.array(
[20, 21, 22])}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}},
"supply-demand adjustment": {
"savings": {
cls.adjust_key1: {
"2009": 0, "2010": 0}},
"total": {
cls.adjust_key1: {
"2009": 100, "2010": 100}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array([15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array([20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array([15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array([20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array(
[15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array(
[20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array(
[15, 16, 17]),
"2010": numpy.array(
[15, 16, 17])}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array(
[20, 21, 22]),
"2010": numpy.array(
[20, 21, 22])}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}},
"supply-demand adjustment": {
"savings": {
cls.adjust_key1: {
"2009": 0, "2010": 0}},
"total": {
cls.adjust_key1: {
"2009": 100, "2010": 100}}}},
"mseg_out_break": {}}}}
cls.compete_meas2 = {
"name": "sample compete measure r2",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["windows"],
"technology_type": {"primary": "demand", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}}}}
cls.compete_meas3 = {
"name": "sample compete measure r3",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["ASHP"],
"technology_type": {"primary": "supply", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}}}}
cls.compete_meas3_dist = {
"name": "sample compete measure r3 dist",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["ASHP"],
"technology_type": {"primary": "demand", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {
"2009": numpy.array([0, 1, 2]),
"2010": numpy.array([0, 1, 2])}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array(
[5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": numpy.array(
[0, 1, 2]),
"2010": numpy.array(
[0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}},
"supply-demand adjustment": {
"savings": {
cls.adjust_key2: {
"2009": 0, "2010": 0}},
"total": {
cls.adjust_key2: {
"2009": 100, "2010": 100}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {
"2009": numpy.array([0, 1, 2]),
"2010": numpy.array([0, 1, 2])}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array(
[5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": numpy.array(
[0, 1, 2]),
"2010": numpy.array(
[0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}},
"supply-demand adjustment": {
"savings": {
cls.adjust_key2: {
"2009": 0, "2010": 0}},
"total": {
cls.adjust_key2: {
"2009": 100, "2010": 100}}}},
"mseg_out_break": {}}}}
cls.compete_meas4 = {
"name": "sample compete measure r4",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["ASHP"],
"technology_type": {"primary": "supply", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}},
"mseg_out_break": {}}}}
cls.compete_meas5 = {
"name": "sample compete measure r5",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["ASHP"],
"technology_type": {"primary": "supply", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 30, "2010": 30}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 15, "2010": 15}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'multi family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 30, "2010": 30}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 15, "2010": 15}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'multi family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}},
"mseg_out_break": {}}}}
cls.measures_all = [run.Measure(cls.handyvars, **x) for x in [
cls.compete_meas1, copy.deepcopy(cls.compete_meas2),
cls.compete_meas3, copy.deepcopy(cls.compete_meas4),
copy.deepcopy(cls.compete_meas5)]]
cls.measures_demand = cls.measures_all[0:2]
cls.measures_supply = cls.measures_all[2:5]
cls.measures_overlap1 = {
"measures": cls.measures_all[2:5],
"keys": [[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))]]}
cls.measures_overlap2 = {
"measures": cls.measures_all[0:2],
"keys": [[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'demand', 'windows', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'demand', 'windows', 'existing'))]]}
cls.a_run = run.Engine(cls.handyvars, cls.measures_all)
# Set information needed to finalize point value test measure
# consumer metrics
consumer_metrics_final = [{
"stock cost": {
"residential": {
"2009": 95,
"2010": 95},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -150,
"2010": -150},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -150,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 120,
"2010": 120},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -400,
"2010": -400},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -50,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 95,
"2010": 95},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -150,
"2010": -150},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -150,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 120,
"2010": 120},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -400,
"2010": -400},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -50,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 100,
"2010": 100},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -200,
"2010": -200},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -100,
"2010": -100},
"commercial": {
"2009": None,
"2010": None}}}]
# Adjust/finalize point value test measure consumer metrics
for ind, m in enumerate(cls.a_run.measures):
m.consumer_metrics['anpv'] = consumer_metrics_final[ind]
cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for x in [
cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2),
cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4),
copy.deepcopy(cls.compete_meas5)]]
cls.measures_demand_dist = cls.measures_all_dist[0:2]
cls.measures_supply_dist = cls.measures_all_dist[2:5]
cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2]
cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5]
cls.measures_overlap1_dist = {
"measures": cls.measures_all_dist[2:5],
"keys": [[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))]]}
cls.measures_overlap2_dist = {
"measures": cls.measures_all_dist[0:2],
"keys": [[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'demand', 'windows', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'demand', 'windows', 'existing'))]]}
cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist)
# Set information needed to finalize array test measure consumer
# metrics
consumer_metrics_final_dist = [{
"stock cost": {
"residential": {
"2009": 95,
"2010": 95},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": numpy.array([-150, -200, -100]),
"2010": numpy.array([-150, -200, -100])},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": numpy.array([-150, -200, -100]),
"2010": numpy.array([-50, -100, -10])},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 120,
"2010": 120},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -400,
"2010": -400},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -50,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": numpy.array([95, 100, 90]),
"2010": numpy.array([95, 100, 90])},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -150,
"2010": -150},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -150,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 120,
"2010": 120},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -400,
"2010": -400},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -50,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 100,
"2010": 100},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -200,
"2010": -200},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -100,
"2010": -100},
"commercial": {
"2009": None,
"2010": None}}}]
# Adjust/finalize point value test measure consumer metrics
for ind, m in enumerate(cls.a_run_dist.measures):
m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind]
cls.measures_master_msegs_out = [{
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 2.23, "2010": 2.23}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 1.11, "2010": 1.11}}},
"energy": {
"total": {
"baseline": {"2009": 2.227001, "2010": 2.227001},
"efficient": {"2009": 1.670251, "2010": 1.670251}},
"competed": {
"baseline": {"2009": 1.113501, "2010": 1.113501},
"efficient": {"2009": 0.5567503, "2010": 0.5567503}}},
"carbon": {
"total": {
"baseline": {"2009": 3.340502, "2010": 3.340502},
"efficient": {"2009": 2.227001, "2010": 2.227001}},
"competed": {
"baseline": {"2009": 1.670251, "2010": 1.670251},
"efficient": {"2009": 0.5567503, "2010": 0.5567503}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 2.227001, "2010": 2.227001},
"efficient": {"2009": 1.113501, "2010": 1.113501}},
"competed": {
"baseline": {"2009": 1.113501, "2010": 1.113501},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 2.227001, "2010": 2.227001},
"efficient": {"2009": 1.670251, "2010": 1.670251}},
"competed": {
"baseline": {"2009": 1.113501, "2010": 1.113501},
"efficient": {"2009": 0.5567503, "2010": 0.5567503}}},
"carbon": {
"total": {
"baseline": {"2009": 3.340502, "2010": 3.340502},
"efficient": {"2009": 2.227001, "2010": 2.227001}},
"competed": {
"baseline": {"2009": 1.670251, "2010": 1.670251},
"efficient": {"2009": 0.5567503, "2010": 0.5567503}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 17.77, "2010": 17.77}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 8.89, "2010": 8.89}}},
"energy": {
"total": {
"baseline": {"2009": 27.77300, "2010": 27.77300},
"efficient": {"2009": 20.82975, "2010": 20.82975}},
"competed": {
"baseline": {"2009": 13.88650, "2010": 13.88650},
"efficient": {"2009": 6.943250, "2010": 6.943250}}},
"carbon": {
"total": {
"baseline": {"2009": 41.65950, "2010": 41.65950},
"efficient": {"2009": 27.77300, "2010": 27.77300}},
"competed": {
"baseline": {"2009": 20.82975, "2010": 20.82975},
"efficient": {"2009": 6.943250, "2010": 6.943250}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 17.77300, "2010": 17.77300},
"efficient": {"2009": 8.886499, "2010": 8.886499}},
"competed": {
"baseline": {"2009": 8.886499, "2010": 8.886499},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 27.77300, "2010": 27.77300},
"efficient": {"2009": 20.82975, "2010": 20.82975}},
"competed": {
"baseline": {"2009": 13.88650, "2010": 13.88650},
"efficient": {"2009": 6.943250, "2010": 6.943250}}},
"carbon": {
"total": {
"baseline": {"2009": 41.65950, "2010": 41.65950},
"efficient": {"2009": 27.77300, "2010": 27.77300}},
"competed": {
"baseline": {"2009": 20.82975, "2010": 20.82975},
"efficient": {"2009": 6.943250, "2010": 6.943250}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 1.73, "2010": 1.73}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0.87, "2010": 0.87}}},
"energy": {
"total": {
"baseline": {"2009": 1.73179114, "2010": 1.73179114},
"efficient": {"2009": 1.29884336, "2010": 1.29884336}},
"competed": {
"baseline": {"2009": 0.865895571, "2010": 0.865895571},
"efficient": {"2009": 0.432947785, "2010": 0.432947785}}},
"carbon": {
"total": {
"baseline": {"2009": 2.59768671, "2010": 2.59768671},
"efficient": {"2009": 1.73179114, "2010": 1.73179114}},
"competed": {
"baseline": {"2009": 1.29884336, "2010": 1.29884336},
"efficient": {"2009": 0.432947785, "2010": 0.432947785}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 1.73179114, "2010": 1.73179114},
"efficient": {
"2009": 0.865895571, "2010": 0.865895571}},
"competed": {
"baseline": {"2009": 0.865895571, "2010": 0.865895571},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 1.73179114, "2010": 1.73179114},
"efficient": {
"2009": 1.29884336, "2010": 1.29884336}},
"competed": {
"baseline": {
"2009": 0.865895571, "2010": 0.865895571},
"efficient": {
"2009": 0.432947785, "2010": 0.432947785}}},
"carbon": {
"total": {
"baseline": {
"2009": 2.59768671, "2010": 2.59768671},
"efficient": {
"2009": 1.73179114, "2010": 1.73179114}},
"competed": {
"baseline": {
"2009": 1.29884336, "2010": 1.29884336},
"efficient": {
"2009": 0.432947785, "2010": 0.432947785}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 16.04, "2010": 16.04}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 8.02, "2010": 8.02}}},
"energy": {
"total": {
"baseline": {"2009": 26.04455, "2010": 26.04455},
"efficient": {"2009": 19.53341, "2010": 19.53341}},
"competed": {
"baseline": {"2009": 13.02227, "2010": 13.02227},
"efficient": {"2009": 6.511136, "2010": 6.511136}}},
"carbon": {
"total": {
"baseline": {"2009": 39.06682, "2010": 39.06682},
"efficient": {"2009": 26.04455, "2010": 26.04455}},
"competed": {
"baseline": {"2009": 19.53341, "2010": 19.53341},
"efficient": {"2009": 6.511136, "2010": 6.511136}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 16.04455, "2010": 16.04455},
"efficient": {"2009": 8.022273, "2010": 8.022273}},
"competed": {
"baseline": {"2009": 8.022273, "2010": 8.022273},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 26.04455, "2010": 26.04455},
"efficient": {"2009": 19.53341, "2010": 19.53341}},
"competed": {
"baseline": {"2009": 13.02227, "2010": 13.02227},
"efficient": {"2009": 6.511136, "2010": 6.511136}}},
"carbon": {
"total": {
"baseline": {"2009": 39.06682, "2010": 39.06682},
"efficient": {"2009": 26.04455, "2010": 26.04455}},
"competed": {
"baseline": {"2009": 19.53341, "2010": 19.53341},
"efficient": {"2009": 6.511136, "2010": 6.511136}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 22.22, "2010": 22.22}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 11.11, "2010": 11.11}}},
"energy": {
"total": {
"baseline": {"2009": 42.22366, "2010": 42.22366},
"efficient": {"2009": 31.66775, "2010": 31.66775}},
"competed": {
"baseline": {"2009": 21.11183, "2010": 21.11183},
"efficient": {"2009": 10.55592, "2010": 10.55592}}},
"carbon": {
"total": {
"baseline": {"2009": 63.33550, "2010": 63.33550},
"efficient": {"2009": 42.22366, "2010": 42.22366}},
"competed": {
"baseline": {"2009": 31.66775, "2010": 31.66775},
"efficient": {"2009": 10.55592, "2010": 10.55592}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 22.22366, "2010": 22.22366},
"efficient": {"2009": 11.11183, "2010": 11.11183}},
"competed": {
"baseline": {"2009": 11.11183, "2010": 11.11183},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 42.22366, "2010": 42.22366},
"efficient": {"2009": 31.66775, "2010": 31.66775}},
"competed": {
"baseline": {"2009": 21.11183, "2010": 21.11183},
"efficient": {"2009": 10.55592, "2010": 10.55592}}},
"carbon": {
"total": {
"baseline": {"2009": 63.33550, "2010": 63.33550},
"efficient": {"2009": 42.22366, "2010": 42.22366}},
"competed": {
"baseline": {"2009": 31.66775, "2010": 31.66775},
"efficient": {"2009": 10.55592, "2010": 10.55592}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}}]
cls.measures_master_msegs_out_dist = [{
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": numpy.array([2.23, 9.77, 0.02]),
"2010": numpy.array([2.23, 9.77, 0.02])}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {
"2009": numpy.array([1.11, 4.89, 0.01]),
"2010": numpy.array([1.11, 4.89, 0.01])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
2.227001, 9.770226, 0.01926735]),
"2010": numpy.array([
2.227001, 9.770226, 0.01926735])},
"efficient": {
"2009": numpy.array([
1.670251, 7.816181, 0.01637724]),
"2010": numpy.array([
1.670251, 7.816181, 0.01637724])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.113501, 4.885113, 0.009633673]),
"2010": numpy.array([
1.113501, 4.885113, 0.009633673])},
"efficient": {
"2009": numpy.array([
0.5567503, 2.931068, 0.006743571]),
"2010": numpy.array([
0.5567503, 2.931068, 0.006743571])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
3.340502, 14.65534, 0.02890102]),
"2010": numpy.array([
3.340502, 14.65534, 0.02890102])},
"efficient": {
"2009": numpy.array([
2.227001, 10.25874, 0.02119408]),
"2010": numpy.array([
2.227001, 10.25874, 0.02119408])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.670251, 7.32767, 0.01445051]),
"2010": numpy.array([
1.670251, 7.32767, 0.01445051])},
"efficient": {
"2009": numpy.array([
0.5567503, 2.931068, 0.006743571]),
"2010": numpy.array([
0.5567503, 2.931068, 0.006743571])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": numpy.array([
2.227001, 9.770226, 0.01926735]),
"2010": numpy.array([
2.227001, 9.770226, 0.01926735])},
"efficient": {
"2009": numpy.array([
1.113501, 4.885113, 0.009633673]),
"2010": numpy.array([
1.113501, 4.885113, 0.009633673])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.113501, 4.885113, 0.009633673]),
"2010": numpy.array([
1.113501, 4.885113, 0.009633673])},
"efficient": {
"2009": numpy.array([0, 0, 0]),
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
2.227001, 9.770226, 0.01926735]),
"2010": numpy.array([
2.227001, 9.770226, 0.01926735])},
"efficient": {
"2009": numpy.array([
1.670251, 7.816181, 0.01637724]),
"2010": numpy.array([
1.670251, 7.816181, 0.01637724])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.113501, 4.885113, 0.009633673]),
"2010": numpy.array([
1.113501, 4.885113, 0.009633673])},
"efficient": {
"2009": numpy.array([
0.5567503, 2.931068, 0.006743571]),
"2010": numpy.array([
0.5567503, 2.931068, 0.006743571])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
3.340502, 14.65534, 0.02890102]),
"2010": numpy.array([
3.340502, 14.65534, 0.02890102])},
"efficient": {
"2009": numpy.array([
2.227001, 10.25874, 0.02119408]),
"2010": numpy.array([
2.227001, 10.25874, 0.02119408])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.670251, 7.32767, 0.01445051]),
"2010": numpy.array([
1.670251, 7.32767, 0.01445051])},
"efficient": {
"2009": numpy.array([
0.5567503, 2.931068, 0.006743571]),
"2010": numpy.array([
0.5567503, 2.931068, 0.006743571])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {
"2009": numpy.array([17.77, 10.23, 19.98]),
"2010": numpy.array([17.77, 10.23, 19.98])}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": numpy.array([8.89, 5.11, 9.99]),
"2010": numpy.array([8.89, 5.11, 9.99])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
27.77300, 20.22977, 29.98073]),
"2010": numpy.array([
27.77300, 20.22977, 29.98073])},
"efficient": {
"2009": numpy.array([
20.82975, 15.17233, 22.48555]),
"2010": numpy.array([
20.82975, 15.17233, 22.48555])}},
"competed": {
"baseline": {
"2009": numpy.array([
13.88650, 10.11489, 14.99037]),
"2010": numpy.array([
13.88650, 10.11489, 14.99037])},
"efficient": {
"2009": numpy.array([
6.943250, 5.057443, 7.495183]),
"2010": numpy.array([
6.943250, 5.057443, 7.495183])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
41.65950, 30.34466, 44.97110]),
"2010": numpy.array([
41.65950, 30.34466, 44.97110])},
"efficient": {
"2009": numpy.array([
27.77300, 20.22977, 29.98073]),
"2010": numpy.array([
27.77300, 20.22977, 29.98073])}},
"competed": {
"baseline": {
"2009": numpy.array([
20.82975, 15.17233, 22.48555]),
"2010": numpy.array([
20.82975, 15.17233, 22.48555])},
"efficient": {
"2009": numpy.array([
6.943250, 5.057443, 7.495183]),
"2010": numpy.array([
6.943250, 5.057443, 7.495183])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": numpy.array([
17.77300, 10.22977, 19.98073]),
"2010": numpy.array([
17.77300, 10.22977, 19.98073])},
"efficient": {
"2009": numpy.array([
8.886499, 5.114887, 9.990366]),
"2010": numpy.array([
8.886499, 5.114887, 9.990366])}},
"competed": {
"baseline": {
"2009": numpy.array([
8.886499, 5.114887, 9.990366]),
"2010": numpy.array([
8.886499, 5.114887, 9.990366])},
"efficient": {
"2009": numpy.array([0, 0, 0]),
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
27.77300, 20.22977, 29.98073]),
"2010": numpy.array([
27.77300, 20.22977, 29.98073])},
"efficient": {
"2009": numpy.array([
20.82975, 15.17233, 22.48555]),
"2010": numpy.array([
20.82975, 15.17233, 22.48555])}},
"competed": {
"baseline": {
"2009": numpy.array([
13.88650, 10.11489, 14.99037]),
"2010": numpy.array([
13.88650, 10.11489, 14.99037])},
"efficient": {
"2009": numpy.array([
6.943250, 5.057443, 7.495183]),
"2010": numpy.array([
6.943250, 5.057443, 7.495183])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
41.65950, 30.34466, 44.97110]),
"2010": numpy.array([
41.65950, 30.34466, 44.97110])},
"efficient": {
"2009": numpy.array([
27.77300, 20.22977, 29.98073]),
"2010": numpy.array([
27.77300, 20.22977, 29.98073])}},
"competed": {
"baseline": {
"2009": numpy.array([
20.82975, 15.17233, 22.48555]),
"2010": numpy.array([
20.82975, 15.17233, 22.48555])},
"efficient": {
"2009": numpy.array([
6.943250, 5.057443, 7.495183]),
"2010": numpy.array([
6.943250, 5.057443, 7.495183])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": numpy.array([1.73, 0.02, 9.60]),
"2010": numpy.array([1.73, 0.02, 9.60])}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {
"2009": numpy.array([0.87, 0.01, 4.80]),
"2010": numpy.array([0.87, 0.01, 4.80])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
1.73179114, 0.01808835, 9.60332155]),
"2010": numpy.array([
1.73179114, 0.01808835, 9.60332155])},
"efficient": {
"2009": numpy.array([
1.29884336, 0.01356626, 7.20249116]),
"2010": numpy.array([
1.29884336, 0.01356626, 7.20249116])}},
"competed": {
"baseline": {
"2009": numpy.array([
0.865895571, 0.009044176, 4.801660776]),
"2010": numpy.array([
0.865895571, 0.009044176, 4.801660776])},
"efficient": {
"2009": numpy.array([
0.432947785, 0.004522088, 2.400830388]),
"2010": numpy.array([
0.432947785, 0.004522088, 2.400830388])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
2.59768671, 0.02713253, 14.40498233]),
"2010": numpy.array([
2.59768671, 0.02713253, 14.40498233])},
"efficient": {
"2009": numpy.array([
1.73179114, 0.01808835, 9.60332155]),
"2010": numpy.array([
1.73179114, 0.01808835, 9.60332155])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.29884336, 0.01356626, 7.20249116]),
"2010": numpy.array([
1.29884336, 0.01356626, 7.20249116])},
"efficient": {
"2009": numpy.array([
0.432947785, 0.004522088, 2.400830388]),
"2010": numpy.array([
0.432947785, 0.004522088, 2.400830388])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": numpy.array([
1.73179114, 0.01808835, 9.60332155]),
"2010": numpy.array([
1.73179114, 0.01808835, 9.60332155])},
"efficient": {
"2009": numpy.array([
0.865895571, 0.01085301, 6.722325]),
"2010": numpy.array([
0.865895571, 0.01085301, 6.722325])}},
"competed": {
"baseline": {
"2009": numpy.array([
0.865895571, 0.009044176, 4.801660776]),
"2010": numpy.array([
0.865895571, 0.009044176, 4.801660776])},
"efficient": {
"2009": numpy.array([
0, 0.001808835, 1.920664]),
"2010": numpy.array([
0, 0.001808835, 1.920664])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
1.73179114, 0.01808835, 9.60332155]),
"2010": numpy.array([
1.73179114, 0.01808835, 9.60332155])},
"efficient": {
"2009": numpy.array([
1.29884336, 0.01356626, 7.20249116]),
"2010": numpy.array([
1.29884336, 0.01356626, 7.20249116])}},
"competed": {
"baseline": {
"2009": numpy.array([
0.865895571, 0.009044176, 4.801660776]),
"2010": numpy.array([
0.865895571, 0.009044176, 4.801660776])},
"efficient": {
"2009": numpy.array([
0.432947785, 0.004522088, 2.400830388]),
"2010": numpy.array([
0.432947785, 0.004522088, 2.400830388])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
2.59768671, 0.02713253, 14.40498233]),
"2010": numpy.array([
2.59768671, 0.02713253, 14.40498233])},
"efficient": {
"2009": numpy.array([
1.73179114, 0.01808835, 9.60332155]),
"2010": numpy.array([
1.73179114, 0.01808835, 9.60332155])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.29884336, 0.01356626, 7.20249116]),
"2010": numpy.array([
1.29884336, 0.01356626, 7.20249116])},
"efficient": {
"2009": numpy.array([
0.432947785, 0.004522088, 2.400830388]),
"2010": numpy.array([
0.432947785, 0.004522088, 2.400830388])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {
"2009": numpy.array([16.04, 17.30, 10.29]),
"2010": numpy.array([16.04, 17.30, 10.29])}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": numpy.array([8.02, 8.65, 5.14]),
"2010": numpy.array([8.02, 8.65, 5.14])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
26.04455, 27.29736, 20.29000]),
"2010": numpy.array([
26.04455, 27.29736, 20.29000])},
"efficient": {
"2009": numpy.array([
19.53341, 20.47302, 15.21750]),
"2010": numpy.array([
19.53341, 20.47302, 15.21750])}},
"competed": {
"baseline": {
"2009": numpy.array([
13.02227, 13.64868, 10.14500]),
"2010": numpy.array([
13.02227, 13.64868, 10.14500])},
"efficient": {
"2009": numpy.array([
6.511136, 6.824341, 5.072499]),
"2010": numpy.array([
6.511136, 6.824341, 5.072499])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
39.06682, 40.94604, 30.43499]),
"2010": numpy.array([
39.06682, 40.94604, 30.43499])},
"efficient": {
"2009": numpy.array([
26.04455, 27.29736, 20.29000]),
"2010": numpy.array([
26.04455, 27.29736, 20.29000])}},
"competed": {
"baseline": {
"2009": numpy.array([
19.53341, 20.47302, 15.21750]),
"2010": numpy.array([
19.53341, 20.47302, 15.21750])},
"efficient": {
"2009": numpy.array([
6.511136, 6.824341, 5.072499]),
"2010": numpy.array([
6.511136, 6.824341, 5.072499])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": numpy.array([
16.04455, 17.29736, 10.29000]),
"2010": numpy.array([
16.04455, 17.29736, 10.29000])},
"efficient": {
"2009": numpy.array([
8.022273, 8.648681, 5.144998]),
"2010": numpy.array([
8.022273, 8.648681, 5.144998])}},
"competed": {
"baseline": {
"2009": numpy.array([
8.022273, 8.648681, 5.144998]),
"2010": numpy.array([
8.022273, 8.648681, 5.144998])},
"efficient": {
"2009": numpy.array([0, 0, 0]),
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
26.04455, 27.29736, 20.29000]),
"2010": numpy.array([
26.04455, 27.29736, 20.29000])},
"efficient": {
"2009": numpy.array([
19.53341, 20.47302, 15.21750]),
"2010": numpy.array([
19.53341, 20.47302, 15.21750])}},
"competed": {
"baseline": {
"2009": numpy.array([
13.02227, 13.64868, 10.14500]),
"2010": numpy.array([
13.02227, 13.64868, 10.14500])},
"efficient": {
"2009": numpy.array([
6.511136, 6.824341, 5.072499]),
"2010": numpy.array([
6.511136, 6.824341, 5.072499])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
39.06682, 40.94604, 30.43499]),
"2010": numpy.array([
39.06682, 40.94604, 30.43499])},
"efficient": {
"2009": numpy.array([
26.04455, 27.29736, 20.29000]),
"2010": numpy.array([
26.04455, 27.29736, 20.29000])}},
"competed": {
"baseline": {
"2009": numpy.array([
19.53341, 20.47302, 15.21750]),
"2010": numpy.array([
19.53341, 20.47302, 15.21750])},
"efficient": {
"2009": numpy.array([
6.511136, 6.824341, 5.072499]),
"2010": numpy.array([
6.511136, 6.824341, 5.072499])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {
"2009": numpy.array([22.22, 22.68, 20.11]),
"2010": numpy.array([22.22, 22.68, 20.11])}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {
"2009": numpy.array([11.11, 11.34, 10.05]),
"2010": numpy.array([11.11, 11.34, 10.05])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
42.22366, 42.68455, 40.10668]),
"2010": numpy.array([
42.22366, 42.68455, 40.10668])},
"efficient": {
"2009": numpy.array([
31.66775, 32.01341, 30.08001]),
"2010": numpy.array([
31.66775, 32.01341, 30.08001])}},
"competed": {
"baseline": {
"2009": numpy.array([
21.11183, 21.34227, 20.05334]),
"2010": numpy.array([
21.11183, 21.34227, 20.05334])},
"efficient": {
"2009": numpy.array([
10.55592, 10.67114, 10.02667]),
"2010": numpy.array([
10.55592, 10.67114, 10.02667])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
63.33550, 64.02682, 60.16002]),
"2010": numpy.array([
63.33550, 64.02682, 60.16002])},
"efficient": {
"2009": numpy.array([
42.22366, 42.68455, 40.10668]),
"2010": numpy.array([
42.22366, 42.68455, 40.10668])}},
"competed": {
"baseline": {
"2009": numpy.array([
31.66775, 32.01341, 30.08001]),
"2010": numpy.array([
31.66775, 32.01341, 30.08001])},
"efficient": {
"2009": numpy.array([
10.55592, 10.67114, 10.02667]),
"2010": numpy.array([
10.55592, 10.67114, 10.02667])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": numpy.array([
22.22366, 22.68455, 20.10668]),
"2010": numpy.array([
22.22366, 22.68455, 20.10668])},
"efficient": {
"2009": numpy.array([
11.11183, 11.34227, 10.05334]),
"2010": numpy.array([
11.11183, 11.34227, 10.05334])}},
"competed": {
"baseline": {
"2009": numpy.array([
11.11183, 11.34227, 10.05334]),
"2010": numpy.array([
11.11183, 11.34227, 10.05334])},
"efficient": {
"2009": numpy.array([0, 0, 0]),
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
42.22366, 42.68455, 40.10668]),
"2010": numpy.array([
42.22366, 42.68455, 40.10668])},
"efficient": {
"2009": numpy.array([
31.66775, 32.01341, 30.08001]),
"2010": numpy.array([
31.66775, 32.01341, 30.08001])}},
"competed": {
"baseline": {
"2009": numpy.array([
21.11183, 21.34227, 20.05334]),
"2010": numpy.array([
21.11183, 21.34227, 20.05334])},
"efficient": {
"2009": numpy.array([
10.55592, 10.67114, 10.02667]),
"2010": numpy.array([
10.55592, 10.67114, 10.02667])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
63.33550, 64.02682, 60.16002]),
"2010": numpy.array([
63.33550, 64.02682, 60.16002])},
"efficient": {
"2009": numpy.array([
42.22366, 42.68455, 40.10668]),
"2010": numpy.array([
42.22366, 42.68455, 40.10668])}},
"competed": {
"baseline": {
"2009": numpy.array([
31.66775, 32.01341, 30.08001]),
"2010": numpy.array([
31.66775, 32.01341, 30.08001])},
"efficient": {
"2009": numpy.array([
10.55592, 10.67114, 10.02667]),
"2010": numpy.array([
10.55592, 10.67114, 10.02667])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}}]
def test_compete_res(self):
"""Test outcomes given valid sample measures w/ point value inputs."""
# Run the measure competition routine on sample demand-side measures
self.a_run.compete_res_primary(
self.measures_demand, self.adjust_key1, self.test_adopt_scheme)
# Remove any market overlaps across the supply and demand sides of
# heating and cooling
self.a_run.htcl_adj(
self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj)
# Run the measure competition routine on sample supply-side measures
self.a_run.compete_res_primary(
self.measures_supply, self.adjust_key2, self.test_adopt_scheme)
# Remove any market overlaps across the supply and demand sides of
# heating and cooling
self.a_run.htcl_adj(
self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj)
# Check updated competed master microsegments for each sample measure
# following competition/supply-demand overlap adjustments
for ind, d in enumerate(self.a_run.measures):
self.dict_check(
self.measures_master_msegs_out[ind],
self.a_run.measures[ind].markets[self.test_adopt_scheme][
"competed"]["master_mseg"])
def test_compete_res_dist(self):
"""Test outcomes given valid sample measures w/ some array inputs."""
# Run the measure competition routine on sample demand-side measures
self.a_run_dist.compete_res_primary(
self.measures_demand_dist, self.adjust_key1,
self.test_adopt_scheme)
# Remove any market overlaps across the supply and demand sides of
# heating and cooling
self.a_run_dist.htcl_adj(
self.measures_demand_dist, self.test_adopt_scheme,
self.test_htcl_adj)
# Run the measure competition routine on sample supply-side measures
self.a_run_dist.compete_res_primary(
self.measures_supply_dist, self.adjust_key2,
self.test_adopt_scheme)
# Remove any market overlaps across the supply and demand sides of
# heating and cooling
self.a_run_dist.htcl_adj(
self.measures_supply_dist, self.test_adopt_scheme,
self.test_htcl_adj)
# Check updated competed master microsegments for each sample measure
# following competition/supply-demand overlap adjustments
for ind, d in enumerate(self.a_run_dist.measures):
self.dict_check(
self.measures_master_msegs_out_dist[ind],
self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][
"competed"]["master_mseg"])
class ComCompeteTest(unittest.TestCase, CommonMethods):
"""Test 'compete_com_primary' and 'secondary_adj' functions.
Verify that 'compete_com_primary' correctly calculates primary market
shares and updates master microsegments for a series of competing
commercial measures; and that 'secondary_adj' correctly adjusts any
secondary markets associated with these primary market microsegments.
Attributes:
handyvars (object): Useful variables across the class.
test_adopt_scheme (string): Sample consumer adoption scheme.
overlap_key (string): First sample string for competed primary market
microsegment key chain being tested.
overlap_key_scnd (string): Second sample string for secondary market
microsegment key chain being tested.
secnd_adj_key (string): Key used to link primary and secondary market
microsegments (by climate, building type, structure type).
compete_meas1 (dict): Sample commercial supply-side lighting measure 1.
compete_meas2 (dict): Sample commercial supply-side lighting measure 2.
compete_meas3 (dict): Sample commercial supply-side lighting measure 3.
compete_meas_dist (dict): Alternative version of sample commercial
supply-side lighting measure 1 including lists stock cost input
values instead of point values.
measures_all (list): List of all competing measures with point
value inputs.
measures_secondary (list): Subset of 'measures_all' with secondary
microsegments to adjust.
a_run (object): Analysis engine object incorporating all
'measures_primary' objects.
measures_all_dist (list): List of competing measures including
some measures with array inputs.
measures_secondary_dist (list): Subset of 'measures_all_dist' with
secondary microsegments to adjust.
a_run_dist (object): Analysis engine object incorporating all
'measures_primary_dist' objects.
measures_overlap (dict): List of supply-side Measure objects and
associated contributing microsegment keys that overlap with
'measures_demand' Measure objects.
measure_master_msegs_out (dict): Master market microsegments
that should be generated for each Measure object in 'measures_all'
following competition and supply-demand overlap adjustments.
measure_master_msegs_out_dist (dict): Master market microsegments
that should be generated for each Measure object in
'measures_all_dist' following competition and supply-demand overlap
adjustments.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
cls.handyvars.retro_rate = 0
cls.handyvars.aeo_years = ["2009", "2010"]
cls.test_adopt_scheme = "Max adoption potential"
cls.overlap_key = str(
('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)',
'lighting', 'reflector (LED)', 'existing'))
cls.overlap_key_scnd = str(
('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)',
'cooling', 'demand', 'lighting gain', 'existing'))
cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing'))
cls.compete_meas1 = {
"name": "sample compete measure c1",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["assembly"],
"end_use": {
"primary": ["lighting"],
"secondary": None},
"technology": ["reflector (LED)"],
"technology_type": {
"primary": "supply", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}}}}
cls.compete_meas2 = {
"name": "sample compete measure c2",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["assembly"],
"end_use": {
"primary": ["lighting"],
"secondary": ["heating", "secondary heating", "cooling"]},
"technology": ["reflector (LED)"],
"technology_type": {
"primary": "supply", "secondary": "demand"},
"market_entry_year": 2010,
"market_exit_year": None,
"yrs_on_mkt": ["2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 0, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 0, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
cls.overlap_key_scnd: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 10}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}},
cls.overlap_key_scnd: {
"rate distribution": {}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 0, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 0, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
cls.overlap_key_scnd: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 10}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}},
cls.overlap_key_scnd: {
"rate distribution": {}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}}}}
cls.compete_meas2_dist = {
"name": "sample compete measure c2 dist",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["assembly"],
"end_use": {
"primary": ["lighting"],
"secondary": ["heating", "secondary heating", "cooling"]},
"technology": ["reflector (LED)"],
"technology_type": {
"primary": "supply", "secondary": "demand"},
"market_entry_year": 2010,
"market_exit_year": None,
"yrs_on_mkt": ["2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 0, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": 20,
"2010": numpy.array([10, 12, 14])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": 10,
"2010": numpy.array([0, 2, 4])}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 0,
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0,
"2010": numpy.array(
[0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
cls.overlap_key_scnd: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10,
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 5,
"2010": numpy.array([
0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}},
cls.overlap_key_scnd: {
"rate distribution": {}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}},
"supply-demand adjustment": {
"savings": {},
"total": {}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 0, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": 20,
"2010": numpy.array([10, 12, 14])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": 10,
"2010": numpy.array([0, 2, 4])}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 0,
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0,
"2010": numpy.array(
[0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
cls.overlap_key_scnd: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10,
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 5,
"2010": numpy.array([
0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}},
cls.overlap_key_scnd: {
"rate distribution": {}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}},
"supply-demand adjustment": {
"savings": {},
"total": {}}},
"mseg_out_break": {}}}}
cls.compete_meas3 = {
"name": "sample compete measure c3",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["assembly"],
"end_use": {
"primary": ["lighting"],
"secondary": None},
"technology": ["reflector (LED)"],
"technology_type": {
"primary": "supply", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 30, "2010": 30}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 15, "2010": 15}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'multi family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 30, "2010": 30}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 15, "2010": 15}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'multi family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}}}}
cls.measures_all = [run.Measure(
cls.handyvars, **x) for x in [
copy.deepcopy(cls.compete_meas1), cls.compete_meas2,
copy.deepcopy(cls.compete_meas3)]]
cls.measures_secondary = [cls.measures_all[1]]
# Instantiate engine object based on above measures
cls.a_run = run.Engine(cls.handyvars, cls.measures_all)
# Set information needed to finalize array test measure consumer
# metrics
consumer_metrics = [{
"stock cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": 100, "rate 2": 110,
"rate 3": 120, "rate 4": 130,
"rate 5": 140, "rate 6": 150,
"rate 7": 160},
"2010": {
"rate 1": 100, "rate 2": 110,
"rate 3": 120, "rate 4": 130,
"rate 5": 140, "rate 6": 150,
"rate 7": 160}}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -350, "rate 2": -60,
"rate 3": -70, "rate 4": -380,
"rate 5": -390, "rate 6": -150,
"rate 7": -400},
"2010": {
"rate 1": -350, "rate 2": -60,
"rate 3": -70, "rate 4": -380,
"rate 5": -390, "rate 6": -150,
"rate 7": -400}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -40, "rate 2": -50,
"rate 3": -55, "rate 4": -60,
"rate 5": -65, "rate 6": -70,
"rate 7": -75},
"2010": {
"rate 1": -40, "rate 2": -50,
"rate 3": -55, "rate 4": -60,
"rate 5": -65, "rate 6": -70,
"rate 7": -75}}}},
{
"stock cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": 85, "rate 2": 90, "rate 3": 95,
"rate 4": 100, "rate 5": 105,
"rate 6": 110, "rate 7": 115},
"2010": {
"rate 1": 85, "rate 2": 90, "rate 3": 95,
"rate 4": 100, "rate 5": 105,
"rate 6": 110, "rate 7": 115}}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -435, "rate 2": -440,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -370},
"2010": {
"rate 1": -435, "rate 2": -440,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -370}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -135, "rate 2": -140,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -170},
"2010": {
"rate 1": -135, "rate 2": -140,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -170}}}},
{
"stock cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": 50, "rate 2": 60, "rate 3": 70,
"rate 4": 80, "rate 5": 90, "rate 6": 100,
"rate 7": 110},
"2010": {
"rate 1": 50, "rate 2": 60, "rate 3": 70,
"rate 4": 80, "rate 5": 90, "rate 6": 100,
"rate 7": 110}}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -190, "rate 2": -195,
"rate 3": -190,
"rate 4": -205, "rate 5": -180,
"rate 6": -230,
"rate 7": -200},
"2010": {
"rate 1": -190, "rate 2": -195,
"rate 3": -190,
"rate 4": -205, "rate 5": -180,
"rate 6": -230,
"rate 7": -200}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -90, "rate 2": -95,
"rate 3": -100,
"rate 4": -105, "rate 5": -110,
"rate 6": -115,
"rate 7": -120},
"2010": {
"rate 1": -90, "rate 2": -95,
"rate 3": -100,
"rate 4": -105, "rate 5": -110,
"rate 6": -115,
"rate 7": -120}}}}]
# Adjust/finalize point value test measure consumer metrics
for ind, m in enumerate(cls.a_run.measures):
m.consumer_metrics['anpv'] = consumer_metrics[ind]
cls.measures_all_dist = [run.Measure(
cls.handyvars, **x) for x in [
copy.deepcopy(cls.compete_meas1),
cls.compete_meas2_dist,
copy.deepcopy(cls.compete_meas3)]]
cls.measures_secondary_dist = [cls.measures_all_dist[1]]
cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist)
# Set information needed to finalize array test measure consumer
# metrics
consumer_metrics_dist = [{
"stock cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": 100, "rate 2": 110,
"rate 3": 120, "rate 4": 130,
"rate 5": 140, "rate 6": 150,
"rate 7": 160},
"2010": {
"rate 1": 100, "rate 2": 110,
"rate 3": 120, "rate 4": 130,
"rate 5": 140, "rate 6": 150,
"rate 7": 160}}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -350, "rate 2": -60,
"rate 3": -70, "rate 4": -380,
"rate 5": -390, "rate 6": -150,
"rate 7": -400},
"2010": {
"rate 1": -350, "rate 2": -60,
"rate 3": -70, "rate 4": -380,
"rate 5": -390, "rate 6": -150,
"rate 7": -400}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -40, "rate 2": -50,
"rate 3": -55, "rate 4": -60,
"rate 5": -65, "rate 6": -70,
"rate 7": -75},
"2010": {
"rate 1": -40, "rate 2": -50,
"rate 3": -55, "rate 4": -60,
"rate 5": -65, "rate 6": -70,
"rate 7": -75}}}},
{
"stock cost": {
"residential": {
"2009": None,
"2010": None
},
"commercial": {
"2009": None,
"2010": numpy.array([
{
"rate 1": 85, "rate 2": 90, "rate 3": 95,
"rate 4": 100, "rate 5": 105,
"rate 6": 110, "rate 7": 115},
{
"rate 1": 205, "rate 2": 100, "rate 3": 105,
"rate 4": 110, "rate 5": 115,
"rate 6": 120, "rate 7": 125},
{
"rate 1": 105, "rate 2": 110, "rate 3": 115,
"rate 4": 120, "rate 5": 125,
"rate 6": 10, "rate 7": 135}])}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -435, "rate 2": -440,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -370},
"2010": {
"rate 1": -435, "rate 2": -440,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -370}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -135, "rate 2": -140,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -170},
"2010": {
"rate 1": -135, "rate 2": -140,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -170}}}},
{
"stock cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": 50, "rate 2": 60, "rate 3": 70,
"rate 4": 80, "rate 5": 90, "rate 6": 100,
"rate 7": 110},
"2010": {
"rate 1": 50, "rate 2": 60, "rate 3": 70,
"rate 4": 80, "rate 5": 90, "rate 6": 100,
"rate 7": 110}}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -190, "rate 2": -195,
"rate 3": -190,
"rate 4": -205, "rate 5": -180,
"rate 6": -230,
"rate 7": -200},
"2010": {
"rate 1": -190, "rate 2": -195,
"rate 3": -190,
"rate 4": -205, "rate 5": -180,
"rate 6": -230,
"rate 7": -200}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -90, "rate 2": -95,
"rate 3": -100,
"rate 4": -105, "rate 5": -110,
"rate 6": -115,
"rate 7": -120},
"2010": {
"rate 1": -90, "rate 2": -95,
"rate 3": -100,
"rate 4": -105, "rate 5": -110,
"rate 6": -115,
"rate 7": -120}}}}]
# Adjust/finalize point value test measure consumer metrics
for ind, m in enumerate(cls.a_run_dist.measures):
m.consumer_metrics['anpv'] = consumer_metrics_dist[ind]
cls.measures_master_msegs_out = [{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 17, "2010": 12}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 8.5, "2010": 6}}},
"energy": {
"total": {
"baseline": {"2009": 34, "2010": 24},
"efficient": {"2009": 25.5, "2010": 18}},
"competed": {
"baseline": {"2009": 17, "2010": 12},
"efficient": {"2009": 8.5, "2010": 6}}},
"carbon": {
"total": {
"baseline": {"2009": 51, "2010": 36},
"efficient": {"2009": 34, "2010": 24}},
"competed": {
"baseline": {"2009": 25.5, "2010": 18},
"efficient": {"2009": 8.5, "2010": 6}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 17, "2010": 12},
"efficient": {"2009": 8.5, "2010": 6}},
"competed": {
"baseline": {"2009": 8.5, "2010": 6},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 34, "2010": 24},
"efficient": {"2009": 25.5, "2010": 18}},
"competed": {
"baseline": {"2009": 17, "2010": 12},
"efficient": {"2009": 8.5, "2010": 6}}},
"carbon": {
"total": {
"baseline": {"2009": 51, "2010": 36},
"efficient": {"2009": 34, "2010": 24}},
"competed": {
"baseline": {"2009": 25.5, "2010": 18},
"efficient": {"2009": 8.5, "2010": 6}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 0, "2010": 16}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 8}}},
"energy": {
"total": {
"baseline": {"2009": 0, "2010": 24},
"efficient": {"2009": 0, "2010": 18}},
"competed": {
"baseline": {"2009": 0, "2010": 12},
"efficient": {"2009": 0, "2010": 6}}},
"carbon": {
"total": {
"baseline": {"2009": 0, "2010": 36},
"efficient": {"2009": 0, "2010": 24}},
"competed": {
"baseline": {"2009": 0, "2010": 18},
"efficient": {"2009": 0, "2010": 6}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 16},
"efficient": {"2009": 20, "2010": 8}},
"competed": {
"baseline": {"2009": 5, "2010": 8},
"efficient": {"2009": 10, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 0, "2010": 24},
"efficient": {"2009": 0, "2010": 18}},
"competed": {
"baseline": {"2009": 0, "2010": 12},
"efficient": {"2009": 0, "2010": 6}}},
"carbon": {
"total": {
"baseline": {"2009": 0, "2010": 36},
"efficient": {"2009": 0, "2010": 24}},
"competed": {
"baseline": {"2009": 0, "2010": 18},
"efficient": {"2009": 0, "2010": 6}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 23, "2010": 22}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 11.5, "2010": 11}}},
"energy": {
"total": {
"baseline": {"2009": 46, "2010": 44},
"efficient": {"2009": 34.5, "2010": 33}},
"competed": {
"baseline": {"2009": 23, "2010": 22},
"efficient": {"2009": 11.5, "2010": 11}}},
"carbon": {
"total": {
"baseline": {"2009": 69, "2010": 66},
"efficient": {"2009": 46, "2010": 44}},
"competed": {
"baseline": {"2009": 34.5, "2010": 33},
"efficient": {"2009": 11.5, "2010": 11}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 23, "2010": 22},
"efficient": {"2009": 11.5, "2010": 11}},
"competed": {
"baseline": {"2009": 11.5, "2010": 11},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 46, "2010": 44},
"efficient": {"2009": 34.5, "2010": 33}},
"competed": {
"baseline": {"2009": 23, "2010": 22},
"efficient": {"2009": 11.5, "2010": 11}}},
"carbon": {
"total": {
"baseline": {"2009": 69, "2010": 66},
"efficient": {"2009": 46, "2010": 44}},
"competed": {
"baseline": {"2009": 34.5, "2010": 33},
"efficient": {"2009": 11.5, "2010": 11}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}}]
cls.measures_master_msegs_out_dist = [{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {
"2009": 17,
"2010": numpy.array([12, 13, 16])}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])}}},
"energy": {
"total": {
"baseline": {
"2009": 34,
"2010": numpy.array([24, 26, 32])},
"efficient": {
"2009": 25.5,
"2010": numpy.array([18, 19.5, 24])}},
"competed": {
"baseline": {
"2009": 17,
"2010": numpy.array([12, 13, 16])},
"efficient": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])}}},
"carbon": {
"total": {
"baseline": {
"2009": 51,
"2010": numpy.array([36, 39, 48])},
"efficient": {
"2009": 34,
"2010": numpy.array([24, 26, 32])}},
"competed": {
"baseline": {
"2009": 25.5,
"2010": numpy.array([18.0, 19.5, 24.0])},
"efficient": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 17,
"2010": numpy.array([12, 13, 16])},
"efficient": {
"2009": 8.5,
"2010": numpy.array([6, 6.5, 8])}},
"competed": {
"baseline": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])},
"efficient": {
"2009": 0,
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": 34,
"2010": numpy.array([24, 26, 32])},
"efficient": {
"2009": 25.5,
"2010": numpy.array([18, 19.5, 24])}},
"competed": {
"baseline": {
"2009": 17,
"2010": numpy.array([12, 13, 16])},
"efficient": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])}}},
"carbon": {
"total": {
"baseline": {
"2009": 51,
"2010": numpy.array([36, 39, 48])},
"efficient": {
"2009": 34,
"2010": numpy.array([24, 26, 32])}},
"competed": {
"baseline": {
"2009": 25.5,
"2010": numpy.array([18.0, 19.5, 24.0])},
"efficient": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {
"2009": 0,
"2010": numpy.array([16, 15, 13])}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": 0,
"2010": numpy.array([8.0, 7.5, 6.5])}}},
"energy": {
"total": {
"baseline": {
"2009": 0,
"2010": numpy.array([24, 20, 12])},
"efficient": {
"2009": 0,
"2010": numpy.array([18, 15, 9])}},
"competed": {
"baseline": {
"2009": 0,
"2010": numpy.array([12, 10, 6])},
"efficient": {
"2009": 0,
"2010": numpy.array([6, 5, 3])}}},
"carbon": {
"total": {
"baseline": {
"2009": 0,
"2010": numpy.array([36, 30, 18])},
"efficient": {
"2009": 0,
"2010": numpy.array([24, 20, 12])}},
"competed": {
"baseline": {
"2009": 0,
"2010": numpy.array([18, 15, 9])},
"efficient": {
"2009": 0,
"2010": numpy.array([6, 5, 3])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10,
"2010": numpy.array([16, 15, 13])},
"efficient": {
"2009": 20,
"2010": numpy.array([8, 9, 9.1])}},
"competed": {
"baseline": {
"2009": 5,
"2010": numpy.array([8.0, 7.5, 6.5])},
"efficient": {
"2009": 10,
"2010": numpy.array([0, 1.5, 2.6])}}},
"energy": {
"total": {
"baseline": {
"2009": 0,
"2010": numpy.array([24, 20, 12])},
"efficient": {
"2009": 0,
"2010": numpy.array([18, 15, 9])}},
"competed": {
"baseline": {
"2009": 0,
"2010": numpy.array([12, 10, 6])},
"efficient": {
"2009": 0,
"2010": numpy.array([6, 5, 3])}}},
"carbon": {
"total": {
"baseline": {
"2009": 0,
"2010": numpy.array([36, 30, 18])},
"efficient": {
"2009": 0,
"2010": numpy.array([24, 20, 12])}},
"competed": {
"baseline": {
"2009": 0,
"2010": numpy.array([18, 15, 9])},
"efficient": {
"2009": 0,
"2010": numpy.array([6, 5, 3])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {
"2009": 30, "2010": 30},
"measure": {
"2009": 23,
"2010": numpy.array([22, 22, 21])}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])}}},
"energy": {
"total": {
"baseline": {
"2009": 46,
"2010": numpy.array([44, 44, 42])},
"efficient": {
"2009": 34.5,
"2010": numpy.array([33, 33, 31.5])}},
"competed": {
"baseline": {
"2009": 23,
"2010": numpy.array([22, 22, 21])},
"efficient": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])}}},
"carbon": {
"total": {
"baseline": {
"2009": 69,
"2010": numpy.array([66, 66, 63])},
"efficient": {
"2009": 46,
"2010": numpy.array([44, 44, 42])}},
"competed": {
"baseline": {
"2009": 34.5,
"2010": numpy.array([33.0, 33.0, 31.5])},
"efficient": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 23,
"2010": numpy.array([22, 22, 21])},
"efficient": {
"2009": 11.5,
"2010": numpy.array([11, 11, 10.5])}},
"competed": {
"baseline": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])},
"efficient": {
"2009": 0,
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": 46,
"2010": numpy.array([44, 44, 42])},
"efficient": {
"2009": 34.5,
"2010": numpy.array([33, 33, 31.5])}},
"competed": {
"baseline": {
"2009": 23,
"2010": numpy.array([22, 22, 21])},
"efficient": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])}}},
"carbon": {
"total": {
"baseline": {
"2009": 69,
"2010": numpy.array([66, 66, 63])},
"efficient": {
"2009": 46,
"2010": numpy.array([44, 44, 42])}},
"competed": {
"baseline": {
"2009": 34.5,
"2010": numpy.array([33.0, 33.0, 31.5])},
"efficient": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}}]
def test_compete_com(self):
"""Test outcomes given sample measures w/ point value inputs."""
# Run measure competition routine on sample measures
self.a_run.compete_com_primary(
self.measures_all, self.overlap_key, self.test_adopt_scheme)
# Run secondary microsegment adjustments on sample measure
self.a_run.secondary_adj(
self.measures_secondary, self.overlap_key_scnd,
self.secnd_adj_key, self.test_adopt_scheme)
# Check updated competed master microsegments for each sample measure
# following competition/secondary microsegment adjustments
for ind, d in enumerate(self.a_run.measures):
self.dict_check(
self.measures_master_msegs_out[ind],
self.a_run.measures[ind].markets[self.test_adopt_scheme][
"competed"]["master_mseg"])
def test_compete_com_dist(self):
"""Test outcomes given valid sample measures w/ some array inputs."""
# Run measure competition routine on sample measures
self.a_run_dist.compete_com_primary(
self.measures_all_dist, self.overlap_key, self.test_adopt_scheme)
# Run secondary microsegment adjustments on sample measure
self.a_run_dist.secondary_adj(
self.measures_secondary_dist, self.overlap_key_scnd,
self.secnd_adj_key, self.test_adopt_scheme)
# Check updated competed master microsegments for each sample measure
# following competition/secondary microsegment adjustments
for ind, d in enumerate(self.a_run_dist.measures):
self.dict_check(
self.measures_master_msegs_out_dist[ind],
self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][
"competed"]["master_mseg"])
class NumpyConversionTest(unittest.TestCase, CommonMethods):
"""Test the operation of the 'convert_to_numpy' function.
Verify that the function converts terminal/leaf node lists in a dict to
numpy arrays.
Attributes:
handyvars (object): Useful variables across the class.
sample_measure (object): Sample measure data with lists to convert.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
cls.sample_measure = {
"market_entry_year": None,
"market_exit_year": None,
"markets": {
"Technical potential": {
"key 1": {
"nested key 1":
[1, 2, 3, 4, 5],
"nested key 2": 5},
"key 2": 10.8},
"Max adoption potential": {
"key 1": {
"nested key 1":
[0.5, 0.2, 0.3, 0.4, 0.5],
"nested key 2": 2},
"key 2": 5.8}}}
def test_numpy_convert(self):
"""Test for correct function output given valid input."""
# Instantiate measure
measure_instance = run.Measure(self.handyvars, **self.sample_measure)
# Test for correct data types in measure markets attribute
for adopt_scheme in self.handyvars.adopt_schemes:
for comp_scheme in ["uncompeted", "competed"]:
tested_data = \
measure_instance.markets[adopt_scheme][comp_scheme]
self.assertTrue(
all([isinstance(x, y) for x, y in zip([
tested_data["key 1"]["nested key 1"],
tested_data["key 1"]["nested key 2"],
tested_data["key 2"]], [numpy.ndarray, int, float])]))
# Offer external code execution (include all lines below this point in all
# test files)
def main():
"""Trigger default behavior of running all test fixtures in the file."""
unittest.main()
if __name__ == "__main__":
main()
| 54.163378
| 81
| 0.291133
| 28,636
| 430,978
| 4.328433
| 0.035235
| 0.088391
| 0.056919
| 0.02614
| 0.912932
| 0.898547
| 0.885542
| 0.870697
| 0.849228
| 0.840233
| 0
| 0.218013
| 0.573164
| 430,978
| 7,956
| 82
| 54.170186
| 0.45578
| 0.043195
| 0
| 0.913708
| 0
| 0
| 0.17494
| 0.000669
| 0
| 0
| 0
| 0
| 0.001728
| 1
| 0.003457
| false
| 0
| 0.000798
| 0
| 0.005584
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
eb1310dd2d1a2dc568fc369bcec39edb4b9db158
| 117
|
py
|
Python
|
code/ch08-outbound-text-messages/db/__all_models.py
|
rcastleman/twilio-and-sendgrid-python-course
|
b1aa8e02725cf9580edb4b0310537fac33a685b0
|
[
"MIT"
] | 29
|
2021-01-08T13:46:35.000Z
|
2022-03-30T04:31:29.000Z
|
code/ch08-outbound-text-messages/db/__all_models.py
|
rcastleman/twilio-and-sendgrid-python-course
|
b1aa8e02725cf9580edb4b0310537fac33a685b0
|
[
"MIT"
] | 2
|
2021-08-29T15:11:23.000Z
|
2022-03-05T00:01:46.000Z
|
code/ch08-outbound-text-messages/db/__all_models.py
|
rcastleman/twilio-and-sendgrid-python-course
|
b1aa8e02725cf9580edb4b0310537fac33a685b0
|
[
"MIT"
] | 30
|
2021-07-02T00:14:58.000Z
|
2022-03-06T00:47:53.000Z
|
# noinspection PyUnresolvedReferences
from db import order
# noinspection PyUnresolvedReferences
from db import user
| 23.4
| 37
| 0.863248
| 12
| 117
| 8.416667
| 0.583333
| 0.673267
| 0.752475
| 0.792079
| 0.910891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119658
| 117
| 4
| 38
| 29.25
| 0.980583
| 0.606838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
eb38cd6c2026cdc2c91f9b5998a41a05edd9faba
| 14,175
|
py
|
Python
|
src/models/operation.py
|
takedarts/DenseResNet
|
d5f9c143ed3c484436a2a5bac366c3795e5d47ec
|
[
"MIT"
] | null | null | null |
src/models/operation.py
|
takedarts/DenseResNet
|
d5f9c143ed3c484436a2a5bac366c3795e5d47ec
|
[
"MIT"
] | null | null | null |
src/models/operation.py
|
takedarts/DenseResNet
|
d5f9c143ed3c484436a2a5bac366c3795e5d47ec
|
[
"MIT"
] | null | null | null |
from .modules import DropBlock, SEModule, SKConv2d, BlurPool2d, SplitAttentionModule
import torch.nn as nn
import collections
class BasicOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=3, padding=1,
stride=1, groups=1, bias=False)),
('norm2', normalization(out_channels)),
('drop2', None if not dropblock else DropBlock()),
] if m[1] is not None))
class BottleneckOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck * groups)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=stride, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class SelectedKernelOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck * groups)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=stride, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', SKConv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, radix=radix, groups=groups)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class PreActBasicOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=3, padding=1,
stride=1, groups=1, bias=False)),
] if m[1] is not None))
class SingleActBasicOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=3, padding=1,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class SingleActBottleneckOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck * groups)
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm3', normalization(channels)),
('drop3', None if not dropblock else DropBlock()),
('act3', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm4', normalization(out_channels)),
('drop4', None if not dropblock else DropBlock()),
] if m[1] is not None))
class TweakedBottleneckOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class TweakedSlectedKernelOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', SKConv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, radix=radix, groups=groups)),
('drop2', None if not dropblock else DropBlock()),
('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class MobileNetOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel, stride, expansion,
normalization, activation, dropblock,
seoperation, sereduction, sesigmoid, **kwargs):
channels = int(in_channels * expansion)
modules = []
if in_channels != channels:
modules.extend([
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
])
modules.extend([
('conv2', nn.Conv2d(
channels, channels, kernel_size=kernel, padding=kernel // 2,
stride=stride, groups=channels, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('semodule', None if not seoperation else SEModule(
channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)),
('act2', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
])
super().__init__(collections.OrderedDict(m for m in modules if m[1] is not None))
class SplitAttentionOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels * radix, kernel_size=3, padding=1,
stride=1, groups=groups * radix, bias=False)),
('norm2', normalization(channels * radix)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('attention', SplitAttentionModule(
channels, radix=radix, groups=groups,
normalization=normalization, activation=activation)),
('downsample', None if stride == 1 else nn.AvgPool2d(
kernel_size=3, stride=stride, padding=1)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class DenseNetOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, growth, expansion,
normalization, activation, dropblock, **kwargs):
if stride != 1:
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)),
] if m[1] is not None))
else:
channels = growth * expansion
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, growth, kernel_size=3, padding=1,
stride=1, bias=False)),
] if m[1] is not None))
| 46.323529
| 90
| 0.559859
| 1,418
| 14,175
| 5.460508
| 0.062764
| 0.055405
| 0.03487
| 0.067416
| 0.868526
| 0.861036
| 0.859357
| 0.843472
| 0.83456
| 0.814155
| 0
| 0.027534
| 0.315908
| 14,175
| 305
| 91
| 46.47541
| 0.77096
| 0
| 0
| 0.826255
| 0
| 0
| 0.041096
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042471
| false
| 0
| 0.011583
| 0
| 0.096525
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
de607818154794bc7b5d3bac1008f150395fc1f4
| 8,991
|
py
|
Python
|
code/route_betweenness.py
|
tlarock/shipping
|
9d8e608e98824281126248d726783eda8ddf80f5
|
[
"MIT"
] | null | null | null |
code/route_betweenness.py
|
tlarock/shipping
|
9d8e608e98824281126248d726783eda8ddf80f5
|
[
"MIT"
] | null | null | null |
code/route_betweenness.py
|
tlarock/shipping
|
9d8e608e98824281126248d726783eda8ddf80f5
|
[
"MIT"
] | null | null | null |
import numpy as np
import networkx as nx
def route_node_betweenness_from_paths(G, filtered_paths):
'''
Computes route betweenness for nodes starting from set of paths filtered_paths.
Uses G only to get the number of nodes; could be done by iterating
over pairs of nodes in filtered_paths or given as input parameter.
Uses dense numpy arrays for computations.
'''
## Zero array of dimensions len(G.nodes()) by len(filtered_paths)
node_to_idx = {node:idx for idx, node in enumerate(G.nodes())}
pair_to_idx = {pair:idx for idx, pair in enumerate(filtered_paths.keys())}
numerator = np.zeros((len(G.nodes()), len(filtered_paths)))
denominator = []
for pair in filtered_paths:
denominator.append(len(filtered_paths[pair]))
for path in filtered_paths[pair]:
for node in path:
numerator[node_to_idx[node], pair_to_idx[pair]] += 1
denominator = np.array(denominator)
normalized_counts = numerator / denominator
total_betweenness = normalized_counts.sum(axis=1)
route_betweenness = {node:total_betweenness[idx] for node, idx in node_to_idx.items()}
return route_betweenness
def route_node_betweenness_from_file(filename):
'''
Computes route betweenness for nodes by reading file filename.
Uses dictionaries for computations.
'''
pair_counter = 0
total_pairs = 0
first = True
node_to_pair_dict = dict()
prev_pair = (-1, -1)
filtered_paths = dict()
with open(filename, 'r') as fin:
for line in fin:
path, *_ = line.strip().split('|')
path = path.strip().split(',')
pair = (path[0], path[-1])
filtered_paths.setdefault(pair, list())
filtered_paths[pair].append(path)
if pair != prev_pair and not first:
nodes_to_norm = set()
for path in filtered_paths[prev_pair]:
for node in path:
node_to_pair_dict.setdefault(node, dict())
node_to_pair_dict[node].setdefault(prev_pair, 0)
node_to_pair_dict[node][prev_pair] += 1
nodes_to_norm.add(node)
## Normalize
for node in nodes_to_norm:
node_to_pair_dict[node][prev_pair] /= len(filtered_paths[prev_pair])
pair_counter += 1
total_pairs += 1
if pair_counter == 150_000:
print(f"{total_pairs} processed.", flush=True)
pair_counter = 0
prev_pair = pair
if first: first = False
## Handle the last pair
for path in filtered_paths[prev_pair]:
nodes_to_norm = set()
for node in path:
node_to_pair_dict.setdefault(node, dict())
node_to_pair_dict[node].setdefault(prev_pair, 0)
node_to_pair_dict[node][prev_pair] += 1
nodes_to_norm.add(node)
## Normalize
for node in nodes_to_norm:
node_to_pair_dict[node][prev_pair] /= len(filtered_paths[prev_pair])
## Compute betweenness by summing over all pairs for each node
route_betweenness = {node:sum(node_to_pair_dict[node].values()) for node in node_to_pair_dict}
return route_betweenness
def route_edge_betweenness_from_paths(G, filtered_paths):
'''
Computes route betweenness for edges starting from set of paths filtered_paths.
Uses G only to get the number of nodes; could be done by iterating
over pairs of nodes in filtered_paths or given as input parameter.
Uses dense numpy arrays for computations.
'''
## Zero array of dimensions len(G.edges()) by len(filtered_paths)
edge_to_idx = {edge:idx for idx, edge in enumerate(G.edges())}
pair_to_idx = {pair:idx for idx, pair in enumerate(filtered_paths.keys())}
numerator = np.zeros((len(G.edges()), len(filtered_paths)))
denominator = []
for pair in filtered_paths:
denominator.append(len(filtered_paths[pair]))
for path in filtered_paths[pair]:
for i in range(1, len(path)):
numerator[edge_to_idx[(path[i-1], path[i])], pair_to_idx[pair]] += 1
denominator = np.array(denominator)
normalized_counts = numerator / denominator
total_betweenness = normalized_counts.sum(axis=1)
route_betweenness = {edge:total_betweenness[idx] for edge, idx in edge_to_idx.items()}
return route_betweenness
def route_edge_betweenness_from_file(filename):
'''
Computes route betweenness for edges by reading file filename.
Uses dictionaries for computations.
'''
pair_counter = 0
total_pairs = 0
first = True
edge_to_pair_dict = dict()
prev_pair = (-1, -1)
filtered_paths = dict()
with open(filename, 'r') as fin:
for line in fin:
path, *_ = line.strip().split('|')
path = path.strip().split(',')
pair = (path[0], path[-1])
filtered_paths.setdefault(pair, list())
filtered_paths[pair].append(path)
if pair != prev_pair and not first:
edges_to_norm = set()
for path in filtered_paths[prev_pair]:
for i in range(1, len(path)):
edge = path[i-1], path[i]
edge_to_pair_dict.setdefault(edge, dict())
edge_to_pair_dict[edge].setdefault(prev_pair, 0)
edge_to_pair_dict[edge][prev_pair] += 1
edges_to_norm.add(edge)
## Normalize
for edge in edges_to_norm:
edge_to_pair_dict[edge][prev_pair] /= len(filtered_paths[prev_pair])
pair_counter += 1
total_pairs += 1
if pair_counter == 150_000:
print(f"{total_pairs} processed.", flush=True)
pair_counter = 0
prev_pair = pair
if first: first = False
## Handle the last pair
for path in filtered_paths[prev_pair]:
edges_to_norm = set()
for i in range(1, len(path)):
edge = path[i-1], path[i]
edge_to_pair_dict.setdefault(edge, dict())
edge_to_pair_dict[edge].setdefault(prev_pair, 0)
edge_to_pair_dict[edge][prev_pair] += 1
edges_to_norm.add(edge)
## Normalize
for edge in edges_to_norm:
edge_to_pair_dict[edge][prev_pair] /= len(filtered_paths[prev_pair])
## Compute betweenness by summing over all pairs for each edge
route_betweenness = {edge:sum(edge_to_pair_dict[edge].values()) for edge in edge_to_pair_dict}
return route_betweenness
def route_path_betweenness_from_file(filename, k):
'''
Computes route betweenness for paths by reading file filename.
Uses dictionaries for computations.
'''
pair_counter = 0
total_pairs = 0
first = True
path_to_pair_dict = dict()
prev_pair = (-1, -1)
filtered_paths = dict()
with open(filename, 'r') as fin:
for line in fin:
path, *_ = line.strip().split('|')
path = path.strip().split(',')
pair = (path[0], path[-1])
filtered_paths.setdefault(pair, list())
filtered_paths[pair].append(path)
if pair != prev_pair and not first:
paths_to_norm = set()
for path in filtered_paths[prev_pair]:
for i in range(0, len(path)-k):
kpath = tuple(path[i:i+k+1])
path_to_pair_dict.setdefault(kpath, dict())
path_to_pair_dict[kpath].setdefault(prev_pair, 0)
path_to_pair_dict[kpath][prev_pair] += 1
paths_to_norm.add(kpath)
## Normalize
for path in paths_to_norm:
path_to_pair_dict[path][prev_pair] /= len(filtered_paths[prev_pair])
pair_counter += 1
total_pairs += 1
if pair_counter == 150_000:
print(f"{total_pairs} processed.", flush=True)
pair_counter = 0
prev_pair = pair
if first: first = False
## Handle the last pair
for path in filtered_paths[prev_pair]:
paths_to_norm = set()
for i in range(0, len(path)-k):
kpath = tuple(path[i:i+k+1])
path_to_pair_dict.setdefault(kpath, dict())
path_to_pair_dict[kpath].setdefault(prev_pair, 0)
path_to_pair_dict[kpath][prev_pair] += 1
paths_to_norm.add(kpath)
## Normalize
for path in paths_to_norm:
path_to_pair_dict[path][prev_pair] /= len(filtered_paths[prev_pair])
## Compute betweenness by summing over all pairs for each path
route_betweenness = {path:sum(path_to_pair_dict[path].values()) for path in path_to_pair_dict}
return route_betweenness
| 39.091304
| 98
| 0.603715
| 1,184
| 8,991
| 4.33277
| 0.091216
| 0.09883
| 0.064327
| 0.049123
| 0.90117
| 0.87193
| 0.865692
| 0.862183
| 0.805263
| 0.805263
| 0
| 0.0113
| 0.30119
| 8,991
| 229
| 99
| 39.262009
| 0.805189
| 0.138249
| 0
| 0.87037
| 0
| 0
| 0.010648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030864
| false
| 0
| 0.012346
| 0
| 0.074074
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
de742dc5458ebc73e271222d4830c67daf1fed8c
| 2,513
|
py
|
Python
|
datawarehouse/edw_migrations/versions/336bfa51f077_facttablesaddactiveflag.py
|
bcgov/foi-reporting
|
25856ce87b668df964ddd16ac7459fae4aa6a7c5
|
[
"Apache-2.0"
] | null | null | null |
datawarehouse/edw_migrations/versions/336bfa51f077_facttablesaddactiveflag.py
|
bcgov/foi-reporting
|
25856ce87b668df964ddd16ac7459fae4aa6a7c5
|
[
"Apache-2.0"
] | 3
|
2022-01-05T18:01:41.000Z
|
2022-02-08T21:51:32.000Z
|
datawarehouse/edw_migrations/versions/336bfa51f077_facttablesaddactiveflag.py
|
bcgov/foi-reporting
|
25856ce87b668df964ddd16ac7459fae4aa6a7c5
|
[
"Apache-2.0"
] | null | null | null |
"""facttablesaddactiveflag
Revision ID: 336bfa51f077
Revises: 32a5c13c6efd
Create Date: 2022-02-28 19:58:46.104675
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '336bfa51f077'
down_revision = '32a5c13c6efd'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('factRequestDetails', sa.Column('secondaryusers', sa.VARCHAR(length=3000)))
op.add_column('factRequestDetails', sa.Column('noofdocdelivered', sa.Integer()))
op.add_column('factRequestDetails', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestPaymentTransaction', sa.Column('paymentreceiveddate', sa.DateTime()))
op.add_column('factRequestPaymentTransaction', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestDocumentReviewLog', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestRedactionLayers', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestExtensions', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestDocumentsDetails', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestInvoices', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestRequesters', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestForDocuments', sa.Column('activeflag', sa.CHAR(length=1)))
def downgrade():
op.add_column('factRequestDetails', sa.Column('secondaryusers', sa.VARCHAR(length=3000)))
op.add_column('factRequestDetails', sa.Column('noofdocdelivered', sa.Integer()))
op.add_column('factRequestDetails', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestPaymentTransaction', sa.Column('paymentreceiveddate', sa.DateTime()))
op.add_column('factRequestPaymentTransaction', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestDocumentReviewLog', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestRedactionLayers', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestExtensions', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestDocumentsDetails', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestInvoices', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestRequesters', sa.Column('activeflag', sa.CHAR(length=1)))
op.add_column('factRequestForDocuments', sa.Column('activeflag', sa.CHAR(length=1)))
| 53.468085
| 99
| 0.750099
| 299
| 2,513
| 6.214047
| 0.187291
| 0.064586
| 0.142088
| 0.193757
| 0.860065
| 0.860065
| 0.860065
| 0.860065
| 0.860065
| 0.860065
| 0
| 0.03223
| 0.086351
| 2,513
| 46
| 100
| 54.630435
| 0.777003
| 0.060485
| 0
| 0.75
| 0
| 0
| 0.363791
| 0.173396
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
def8bcce7771078ec9255e8159c19a588668cf9a
| 235
|
py
|
Python
|
hiddenfigures/__init__.py
|
millcityrunner/carball
|
c94cb8caf2f00e616409d2c22b73475e6268fffa
|
[
"Apache-2.0"
] | null | null | null |
hiddenfigures/__init__.py
|
millcityrunner/carball
|
c94cb8caf2f00e616409d2c22b73475e6268fffa
|
[
"Apache-2.0"
] | null | null | null |
hiddenfigures/__init__.py
|
millcityrunner/carball
|
c94cb8caf2f00e616409d2c22b73475e6268fffa
|
[
"Apache-2.0"
] | null | null | null |
try:
from hiddenfigures.decompile_replays import decompile_replay
from hiddenfigures.decompile_replays import analyze_replay_file
except ModuleNotFoundError as e:
print("Not importing functions due to missing packages:", e)
| 47
| 67
| 0.821277
| 29
| 235
| 6.482759
| 0.724138
| 0.180851
| 0.276596
| 0.351064
| 0.414894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13617
| 235
| 5
| 68
| 47
| 0.926108
| 0
| 0
| 0
| 0
| 0
| 0.20339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
defa4821fe10db31dd23c7e76e31ba30c228307c
| 9,155
|
py
|
Python
|
Features/hadoop.py
|
Archishman-Ghosh/AI_Assistant
|
5510ae1852c7bbe41e29123b20fea6ac6aff8e2f
|
[
"MIT"
] | 1
|
2021-02-01T05:08:06.000Z
|
2021-02-01T05:08:06.000Z
|
Features/hadoop.py
|
Archishman-Ghosh/AI_Assistant
|
5510ae1852c7bbe41e29123b20fea6ac6aff8e2f
|
[
"MIT"
] | null | null | null |
Features/hadoop.py
|
Archishman-Ghosh/AI_Assistant
|
5510ae1852c7bbe41e29123b20fea6ac6aff8e2f
|
[
"MIT"
] | 2
|
2020-12-06T06:02:37.000Z
|
2021-02-01T05:07:51.000Z
|
import os
import subprocess as sp
def namenode_configuration(namenode_ip, namenode_directory, namenode_password, host):
"""
this function configures the namenode on local host and remote system
if host = 0 --> local configure
if host = 1 --> remote configure
:param namenode_ip: string ip of namenode
:param namenode_directory: string path to namenode directory
:param namenode_password: string namenode password
:param host: integer value (0 or 1)
:return:
"""
if host == 0:
os.system("sudo mkdir /{}".format(namenode_directory))
f1 = open("/etc/hadoop/hdfs-site.xml", "w")
f1.write(
'<?xml version="1.0"?>\n<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> \n\n\n<!-- Put site-specific property overrides in this file. -->\n\n\n<configuration>\n\n<property>\n<name>dfs.name.dir</name>\n<value>/{}</value>\n</property>\n\n</configuration>'.format(
namenode_directory))
f1.close()
f2 = open("/etc/hadoop/core-site.xml", "w")
f2.write(
'<?xml version="1.0"?>\n<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>\n\n\n<!-- Put site-specific property overrides in this file. -->\n\n<configuration>\n\n<property>\n<name>fs.default.name</name>\n<value>hdfs://{}:9001</value>\n</property>\n\n</configuration>'.format(
namenode_ip))
f2.close()
os.system("sudo echo Y | hadoop namenode -format")
else:
os.system('sshpass -p "{}" sudo ssh root@{} "sudo mkdir /root/{}"'.format(namenode_password, namenode_ip,
namenode_directory))
os.system("sudo touch /root/hdfs-site.xml")
f1 = open("/root/hdfs-site.xml", "w")
f1.write(
'<?xml version="1.0"?>\n<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> \n\n\n<!-- Put site-specific property overrides in this file. -->\n\n\n<configuration>\n\n<property>\n<name>dfs.name.dir</name>\n<value>/{}</value>\n</property>\n\n</configuration>'.format(
namenode_directory))
f1.close()
os.system("sudo touch /root/core-site.xml")
f2 = open("/root/core-site.xml", "w")
f2.write(
'<?xml version="1.0"?>\n<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>\n\n\n<!-- Put site-specific property overrides in this file. -->\n\n<configuration>\n\n<property>\n<name>fs.default.name</name>\n<value>hdfs://{}:9001</value>\n</property>\n\n</configuration>'.format(
namenode_ip))
f2.close()
os.system('sshpass -p "{}" sudo scp /root/hdfs-site.xml {}:/etc/hadoop'.format(namenode_password, namenode_ip))
os.system('sshpass -p "{}" sudo scp /root/core-site.xml {}:/etc/hadoop'.format(namenode_password, namenode_ip))
os.system(
"sshpass -p '{}' sudo ssh root@{} sudo hadoop namenode -format -y".format(namenode_password, namenode_ip))
def datanode_configuration(namenode_ip, datanode_ip, datanode_directory, datanode_password, host):
"""
this function configures the datanode on local host and remote system
if host = 0 --> local configure
if host = 1 --> remote configure
:param namenode_ip: string datatype namenode ip
:param datanode_ip: string datatype datanode ip
:param datanode_directory: string datatype datanode directory path
:param datanode_password: string password of datanode system
:param host: integer value (0 or 1)
:return:
"""
if host == 0:
os.system("sudo mkdir /{}".format(datanode_directory))
f1 = open("/etc/hadoop/hdfs-site.xml", "w")
f1.write(
'<?xml version="1.0"?>\n<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> \n\n\n<!-- Put site-specific property overrides in this file. -->\n\n\n<configuration>\n\n<property>\n<name>dfs.data.dir</name>\n<value>/{}</value>\n</property>\n\n</configuration>'.format(
datanode_directory))
f1.close()
f2 = open("/etc/hadoop/core-site.xml", "w")
f2.write(
'<?xml version="1.0"?>\n<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>\n\n\n<!-- Put site-specific property overrides in this file. -->\n\n<configuration>\n\n<property>\n<name>fs.default.name</name>\n<value>hdfs://{}:9001</value>\n</property>\n\n</configuration>'.format(
namenode_ip))
f2.close()
else:
os.system('sshpass -p "{}" sudo ssh root@{} "sudo mkdir /{}"'.format(datanode_password, datanode_ip,
datanode_directory))
os.system("sudo touch /root/hdfs-site.xml")
f1 = open("/root/hdfs-site.xml", "w")
f1.write(
'<?xml version="1.0"?>\n<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> \n\n\n<!-- Put site-specific property overrides in this file. -->\n\n\n<configuration>\n\n<property>\n<name>dfs.data.dir</name>\n<value>/{}</value>\n</property>\n\n</configuration>'.format(
datanode_directory))
f1.close()
os.system("sudo touch /root/core-site.xml")
f2 = open("/root/core-site.xml", "w")
f2.write(
'<?xml version="1.0"?>\n<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>\n\n\n<!-- Put site-specific property overrides in this file. -->\n\n<configuration>\n\n<property>\n<name>fs.default.name</name>\n<value>hdfs://{}:9001</value>\n</property>\n\n</configuration>'.format(
namenode_ip))
f2.close()
os.system('sshpass -p "{}" sudo scp hdfs-site.xml {}:/etc/hadoop'.format(datanode_password, datanode_ip))
os.system('sshpass -p "{}" sudo scp core-site.xml {}:/etc/hadoop'.format(datanode_password, datanode_ip))
def namenode_start(namenode_ip, namenode_password, host):
"""
this function starts the namenode
if host = 0 --> local start
if host = 1 --> remote start
:param namenode_ip: string datatype namenode ip
:param namenode_password: string datatype namenode password
:param host: integer value (0 or 1)
:return: output tuple variable
"""
if host == 0:
output = sp.getstatusoutput("sudo hadoop-daemon.sh start namenode")
return output
else:
output = sp.getstatusoutput(
'sshpass -p "{}" sudo ssh root@{} "sudo hadoop-daemon.sh start namenode"'.format(namenode_password,
namenode_ip))
return output
def datanode_start(datanode_ip, datanode_password, host):
"""
this function starts the datanode
:param datanode_ip: string datatype datanode ip
:param datanode_password: string datatype datanode ip
:param host: integer value (0 or 1)
:return: output tuple datatype variable
"""
if host == 0:
output = sp.getstatusoutput("sudo hadoop-daemon.sh start datanode")
return output
else:
output = sp.getstatusoutput(
'sshpass -p "{}" sudo ssh root@{} "sudo hadoop-daemon.sh start datanode"'.format(datanode_password,
datanode_ip))
return output
def namenode_stop(namenode_ip, namenode_password, host):
"""
this function stops the name node daemon.
if host = 0 --> local name node stop
if host = 1 --> remote name node stop
:param namenode_ip: string datatype namenode ip
:param namenode_password: string datatype name node ip
:param host: integer value (0 or 1)
:return: output tuple datatype variable
"""
if host == 0:
output = sp.getstatusoutput("sudo hadoop-daemon.sh stop namenode")
return output
else:
output = sp.getstatusoutput(
'sshpass -p "{}" sudo ssh root@{} "sudo hadoop-daemon.sh stop namenode"'.format(namenode_password,
namenode_ip))
return output
def datanode_stop(datanode_ip, datanode_password, host):
"""
this function stops the data node daemon.
if host = 0 --> local data node stop
if host = 1 --> remote data node stop
:param datanode_ip: string datatype data node ip
:param datanode_password: string datatype data node password
:param host: integer value (0 or 1)
:return: output tuple datatype variable
"""
if host == 0:
output = sp.getstatusoutput("sudo hadoop-daemon.sh stop datanode")
return output
else:
output = sp.getstatusoutput(
'sshpass -p "{}" sudo ssh root@{} "sudo hadoop-daemon.sh stop datanode"'.format(datanode_password,
datanode_ip))
return output
def check_report():
"""
this function shows the report of hadoop cluster
:return: output tuple variable containing cluster report.
"""
output = sp.getstatusoutput("sudo hadoop dfsadmin -report")
return output
| 50.302198
| 296
| 0.608411
| 1,166
| 9,155
| 4.716124
| 0.08319
| 0.016003
| 0.043644
| 0.023277
| 0.872886
| 0.839789
| 0.790871
| 0.753046
| 0.747772
| 0.691398
| 0
| 0.012215
| 0.248826
| 9,155
| 181
| 297
| 50.580111
| 0.787407
| 0.210049
| 0
| 0.721154
| 0
| 0.076923
| 0.48179
| 0.229997
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067308
| false
| 0.163462
| 0.019231
| 0
| 0.173077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
72111f717911ce0301dac0771d58a2d77bf255f1
| 1,923
|
py
|
Python
|
Paper_Specific_Versions/2019_DTI/Code/10-ADNI_classification_feature_selection.py
|
adamwild/AD-ML
|
e4ac0b7d312ab482b9b52bb3f5c6745cc06431e9
|
[
"MIT"
] | null | null | null |
Paper_Specific_Versions/2019_DTI/Code/10-ADNI_classification_feature_selection.py
|
adamwild/AD-ML
|
e4ac0b7d312ab482b9b52bb3f5c6745cc06431e9
|
[
"MIT"
] | null | null | null |
Paper_Specific_Versions/2019_DTI/Code/10-ADNI_classification_feature_selection.py
|
adamwild/AD-ML
|
e4ac0b7d312ab482b9b52bb3f5c6745cc06431e9
|
[
"MIT"
] | null | null | null |
from clinica_ml_dwi.mlworkflow_dwi_utils import run_voxel_based_classification
# ########################
# ### Original classification
# ########################
caps_directory= 'PATH/TO/CAPS'
output_dir = 'PATH/TO/CLASSIFICATION_OUTPUT'
diagnoses_tsv = 'PATH/TO/DIAGONISIS_TSV'
subjects_visits_tsv = 'PATH/TO/DIAGONISIS_TSV'
n_threads = 72
n_iterations = 250
test_size = 0.2
grid_search_folds = 10
tissue_type=['GM_WM']
task='AD_vs_CN_VB'
# ########################
# ### ANOVA feature selection
# ########################
## Nested ANOVA
feature_selection_method='ANOVA'
run_voxel_based_classification(caps_directory, diagnoses_tsv, subjects_visits_tsv, output_dir,
task, n_threads, n_iterations, test_size, grid_search_folds, tissue_type=tissue_type, feature_selection_nested=True, feature_selection_method=feature_selection_method)
## Nested SVM-RFE
feature_selection_method='RFE'
run_voxel_based_classification(caps_directory, diagnoses_tsv, subjects_visits_tsv, output_dir,
task, n_threads, n_iterations, test_size, grid_search_folds, tissue_type=tissue_type, feature_selection_nested=True, feature_selection_method=feature_selection_method)
## Non_nested ANOVA
feature_selection_method='ANOVA'
run_voxel_based_classification(caps_directory, diagnoses_tsv, subjects_visits_tsv, output_dir,
task, n_threads, n_iterations, test_size, grid_search_folds, tissue_type=tissue_type, feature_selection_non_nested=True, feature_selection_method=feature_selection_method)
## Non_nested SVM-RFE
feature_selection_method='RFE'
run_voxel_based_classification(caps_directory, diagnoses_tsv, subjects_visits_tsv, output_dir,
task, n_threads, n_iterations, test_size, grid_search_folds, tissue_type=tissue_type, feature_selection_non_nested=True, feature_selection_method=feature_selection_method)
| 48.075
| 203
| 0.75455
| 242
| 1,923
| 5.495868
| 0.223141
| 0.204511
| 0.198496
| 0.101504
| 0.798496
| 0.765414
| 0.765414
| 0.765414
| 0.765414
| 0.765414
| 0
| 0.00536
| 0.126885
| 1,923
| 40
| 204
| 48.075
| 0.786778
| 0.059802
| 0
| 0.521739
| 0
| 0
| 0.069231
| 0.043195
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043478
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
722e8cdcdb23d289453fecd05b928fe11b76cb3c
| 679
|
py
|
Python
|
boto3_type_annotations/boto3_type_annotations/ecs/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations/boto3_type_annotations/ecs/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations/boto3_type_annotations/ecs/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Dict
from typing import List
from botocore.waiter import Waiter
class ServicesInactive(Waiter):
def wait(self, services: List, cluster: str = None, include: List = None, WaiterConfig: Dict = None):
pass
class ServicesStable(Waiter):
def wait(self, services: List, cluster: str = None, include: List = None, WaiterConfig: Dict = None):
pass
class TasksRunning(Waiter):
def wait(self, tasks: List, cluster: str = None, include: List = None, WaiterConfig: Dict = None):
pass
class TasksStopped(Waiter):
def wait(self, tasks: List, cluster: str = None, include: List = None, WaiterConfig: Dict = None):
pass
| 28.291667
| 105
| 0.686303
| 85
| 679
| 5.482353
| 0.270588
| 0.077253
| 0.111588
| 0.145923
| 0.723176
| 0.723176
| 0.723176
| 0.723176
| 0.723176
| 0.723176
| 0
| 0
| 0.212077
| 679
| 23
| 106
| 29.521739
| 0.871028
| 0
| 0
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| false
| 0.266667
| 0.2
| 0
| 0.733333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
72447827cb919ab2b32326c0bcb9ab5ec7998e7a
| 165
|
py
|
Python
|
test_utils.py
|
alan125999/alien-invasion
|
12c6b0bff7907863f6efe824ab9df537cf016a25
|
[
"MIT"
] | null | null | null |
test_utils.py
|
alan125999/alien-invasion
|
12c6b0bff7907863f6efe824ab9df537cf016a25
|
[
"MIT"
] | null | null | null |
test_utils.py
|
alan125999/alien-invasion
|
12c6b0bff7907863f6efe824ab9df537cf016a25
|
[
"MIT"
] | null | null | null |
from utils import hide_mouse_cursor
import pygame
# Test Utils
def test_hide_mouse_cursor():
hide_mouse_cursor()
assert pygame.mouse.get_visible() == False
| 20.625
| 46
| 0.775758
| 24
| 165
| 5
| 0.541667
| 0.225
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 165
| 8
| 46
| 20.625
| 0.857143
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a0cf825ea2b599801da33a9be93b6bec2d322cb5
| 10,269
|
py
|
Python
|
e2efold_rt/e2efold/evaluation.py
|
Lucmon/TopologyDetect
|
79607f3ce39a1ee6ded41b2500629065cf1cfe51
|
[
"Apache-2.0"
] | 64
|
2020-02-18T00:49:05.000Z
|
2022-03-25T01:52:29.000Z
|
e2efold_rt/e2efold/evaluation.py
|
Lucmon/TopologyDetect
|
79607f3ce39a1ee6ded41b2500629065cf1cfe51
|
[
"Apache-2.0"
] | 11
|
2020-02-28T11:45:40.000Z
|
2022-03-24T05:41:17.000Z
|
e2efold_rt/e2efold/evaluation.py
|
Lucmon/TopologyDetect
|
79607f3ce39a1ee6ded41b2500629065cf1cfe51
|
[
"Apache-2.0"
] | 17
|
2020-02-19T03:10:25.000Z
|
2021-12-23T07:57:17.000Z
|
import torch
from e2efold.common.utils import *
from e2efold.postprocess import postprocess
import _pickle as pickle
# randomly select one sample from the test set and perform the evaluation
def model_eval(val_generator, contact_net, lag_pp_net, device):
contact_net.eval()
lag_pp_net.eval()
contacts, seq_embeddings, matrix_reps, seq_lens = next(iter(val_generator))
contacts_batch = torch.Tensor(contacts.float()).to(device)
seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)
matrix_reps_batch = torch.unsqueeze(
torch.Tensor(matrix_reps.float()).to(device), -1)
# padding the states for supervised training with all 0s
state_pad = torch.zeros(contacts.shape).to(device)
PE_batch = get_pe(seq_lens, contacts.shape[-1]).float().to(device)
with torch.no_grad():
pred_contacts = contact_net(PE_batch,
seq_embedding_batch, state_pad)
a_pred_list = lag_pp_net(pred_contacts, seq_embedding_batch)
final_pred = (a_pred_list[-1].cpu()>0.5).float()
result_tuple_list = list(map(lambda i: evaluate_exact(final_pred.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
exact_p,exact_r,exact_f1 = zip(*result_tuple_list)
print('Average testing precision: ', np.average(exact_p))
print('Average testing recall score: ', np.average(exact_r))
print('Average testing f1 score: ', np.average(exact_f1))
result_tuple_list_shift = list(map(lambda i: evaluate_shifted(final_pred.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
shift_p,shift_r,shift_f1 = zip(*result_tuple_list_shift)
print('Average testing precision allow shift: ', np.average(shift_p))
print('Average testing recall score allow shift: ', np.average(shift_r))
print('Average testing f1 score allow shift: ', np.average(shift_f1))
def model_eval_all_test(test_generator, contact_net, lag_pp_net, device):
contact_net.eval()
lag_pp_net.eval()
result_no_train = list()
result_no_train_shift = list()
result_pp = list()
result_pp_shift = list()
f1_no_train = list()
f1_pp = list()
seq_lens_list = list()
batch_n = 0
for contacts, seq_embeddings, matrix_reps, seq_lens in test_generator:
if batch_n %10==0:
print('Batch number: ', batch_n)
batch_n += 1
contacts_batch = torch.Tensor(contacts.float()).to(device)
seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)
matrix_reps_batch = torch.unsqueeze(
torch.Tensor(matrix_reps.float()).to(device), -1)
state_pad = torch.zeros(contacts.shape).to(device)
PE_batch = get_pe(seq_lens, contacts.shape[-1]).float().to(device)
with torch.no_grad():
pred_contacts = contact_net(PE_batch,
seq_embedding_batch, state_pad)
a_pred_list = lag_pp_net(pred_contacts, seq_embedding_batch)
# only post-processing without learning
u_no_train = postprocess(pred_contacts,
seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)
map_no_train = (u_no_train > 0.5).float()
result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
result_no_train += result_no_train_tmp
result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
result_no_train_shift += result_no_train_tmp_shift
f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
f1_no_train += f1_no_train_tmp
# the learning pp result
final_pred = (a_pred_list[-1].cpu()>0.5).float()
result_tmp = list(map(lambda i: evaluate_exact(final_pred.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
result_pp += result_tmp
result_tmp_shift = list(map(lambda i: evaluate_shifted(final_pred.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
result_pp_shift += result_tmp_shift
f1_tmp = list(map(lambda i: F1_low_tri(final_pred.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
f1_pp += f1_tmp
seq_lens_list += list(seq_lens)
nt_exact_p,nt_exact_r,nt_exact_f1 = zip(*result_no_train)
nt_shift_p,nt_shift_r,nt_shift_f1 = zip(*result_no_train_shift)
pp_exact_p,pp_exact_r,pp_exact_f1 = zip(*result_pp)
pp_shift_p,pp_shift_r,pp_shift_f1 = zip(*result_pp_shift)
print('Average testing F1 score with learning post-processing: ', np.average(pp_exact_f1))
print('Average testing F1 score with zero parameter pp: ', np.average(nt_exact_f1))
print('Average testing F1 score with learning post-processing allow shift: ', np.average(pp_shift_f1))
print('Average testing F1 score with zero parameter pp allow shift: ', np.average(nt_shift_f1))
print('Average testing precision with learning post-processing: ', np.average(pp_exact_p))
print('Average testing precision with zero parameter pp: ', np.average(nt_exact_p))
print('Average testing precision with learning post-processing allow shift: ', np.average(pp_shift_p))
print('Average testing precision with zero parameter pp allow shift: ', np.average(nt_shift_p))
print('Average testing recall with learning post-processing: ', np.average(pp_exact_r))
print('Average testing recall with zero parameter pp : ', np.average(nt_exact_r))
print('Average testing recall with learning post-processing allow shift: ', np.average(pp_shift_r))
print('Average testing recall with zero parameter pp allow shift: ', np.average(nt_shift_r))
result_dict = dict()
result_dict['exact_p'] = pp_exact_p
result_dict['exact_r'] = pp_exact_r
result_dict['exact_f1'] = pp_exact_f1
result_dict['shift_p'] = pp_shift_p
result_dict['shift_r'] = pp_shift_r
result_dict['shift_f1'] = pp_shift_f1
result_dict['seq_lens'] = seq_lens_list
result_dict['exact_weighted_f1'] = np.sum(np.array(pp_exact_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))
result_dict['shift_weighted_f1'] = np.sum(np.array(pp_shift_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))
# with open('../results/rnastralign_short_e2e_evaluation_dict.pickle', 'wb') as f:
# pickle.dump(result_dict, f)
def all_test_only_e2e(test_generator, contact_net, lag_pp_net, device, test_data):
contact_net.eval()
lag_pp_net.eval()
result_no_train = list()
result_no_train_shift = list()
result_pp = list()
result_pp_shift = list()
f1_no_train = list()
f1_pp = list()
seq_lens_list = list()
batch_n = 0
for contacts, seq_embeddings, matrix_reps, seq_lens in test_generator:
if batch_n %10==0:
print('Batch number: ', batch_n)
batch_n += 1
contacts_batch = torch.Tensor(contacts.float()).to(device)
seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)
matrix_reps_batch = torch.unsqueeze(
torch.Tensor(matrix_reps.float()).to(device), -1)
state_pad = torch.zeros(contacts.shape).to(device)
PE_batch = get_pe(seq_lens, contacts.shape[-1]).float().to(device)
with torch.no_grad():
pred_contacts = contact_net(PE_batch,
seq_embedding_batch, state_pad)
a_pred_list = lag_pp_net(pred_contacts, seq_embedding_batch)
# the learning pp result
final_pred = (a_pred_list[-1].cpu()>0.5).float()
result_tmp = list(map(lambda i: evaluate_exact(final_pred.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
result_pp += result_tmp
result_tmp_shift = list(map(lambda i: evaluate_shifted(final_pred.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
result_pp_shift += result_tmp_shift
f1_tmp = list(map(lambda i: F1_low_tri(final_pred.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
f1_pp += f1_tmp
seq_lens_list += list(seq_lens)
pp_exact_p,pp_exact_r,pp_exact_f1 = zip(*result_pp)
pp_shift_p,pp_shift_r,pp_shift_f1 = zip(*result_pp_shift)
# the following is inserted for arxiv ii
pp_exact_p = np.nan_to_num(np.array(pp_exact_p))
pp_exact_r = np.nan_to_num(np.array(pp_exact_r))
pp_exact_f1 = np.nan_to_num(np.array(pp_exact_f1))
pp_shift_p = np.nan_to_num(np.array(pp_shift_p))
pp_shift_r = np.nan_to_num(np.array(pp_shift_r))
pp_shift_f1 = np.nan_to_num(np.array(pp_shift_f1))
print('Average testing F1 score with learning post-processing: ', np.average(pp_exact_f1))
print('Average testing F1 score with learning post-processing allow shift: ', np.average(pp_shift_f1))
print('Average testing precision with learning post-processing: ', np.average(pp_exact_p))
print('Average testing precision with learning post-processing allow shift: ', np.average(pp_shift_p))
print('Average testing recall with learning post-processing: ', np.average(pp_exact_r))
print('Average testing recall with learning post-processing allow shift: ', np.average(pp_shift_r))
result_dict = dict()
result_dict['exact_p'] = pp_exact_p
result_dict['exact_r'] = pp_exact_r
result_dict['exact_f1'] = pp_exact_f1
result_dict['shift_p'] = pp_shift_p
result_dict['shift_r'] = pp_shift_r
result_dict['shift_f1'] = pp_shift_f1
result_dict['seq_lens'] = seq_lens_list
result_dict['exact_weighted_f1'] = np.sum(np.array(pp_exact_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))
result_dict['shift_weighted_f1'] = np.sum(np.array(pp_shift_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))
result_dict['name'] = [a.name for a in test_data.data]
# with open('../results/rnastralign_short_e2e_evaluation_dict.pickle', 'wb') as f:
# pickle.dump(result_dict, f)
# with open('../results/archiveii_short_e2e_evaluation_dict.pickle', 'wb') as f:
# pickle.dump(result_dict, f)
| 46.466063
| 114
| 0.698023
| 1,575
| 10,269
| 4.215873
| 0.08381
| 0.033735
| 0.068675
| 0.034337
| 0.890964
| 0.865361
| 0.83509
| 0.822741
| 0.780422
| 0.769729
| 0
| 0.013557
| 0.181128
| 10,269
| 220
| 115
| 46.677273
| 0.776073
| 0.057065
| 0
| 0.746988
| 0
| 0
| 0.152534
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018072
| false
| 0
| 0.024096
| 0
| 0.042169
| 0.156627
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a0d41ab5f8ea8f87635db8b48a9556cc76f6c29d
| 211
|
py
|
Python
|
nmigen/back/pysim.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 528
|
2020-01-28T18:21:00.000Z
|
2021-12-09T06:27:51.000Z
|
nmigen/back/pysim.py
|
DX-MON/nmigen
|
a6a13dd612ee1c9215719c70a5aa410a8775ffdb
|
[
"BSD-2-Clause"
] | 360
|
2020-01-28T18:34:30.000Z
|
2021-12-10T08:03:32.000Z
|
nmigen/back/pysim.py
|
DX-MON/nmigen
|
a6a13dd612ee1c9215719c70a5aa410a8775ffdb
|
[
"BSD-2-Clause"
] | 100
|
2020-02-06T21:55:46.000Z
|
2021-11-25T19:20:44.000Z
|
from amaranth.back.pysim import *
from amaranth.back.pysim import __all__
import warnings
warnings.warn("instead of nmigen.back.pysim, use amaranth.back.pysim",
DeprecationWarning, stacklevel=2)
| 26.375
| 70
| 0.758294
| 27
| 211
| 5.777778
| 0.555556
| 0.230769
| 0.326923
| 0.269231
| 0.346154
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005618
| 0.156398
| 211
| 7
| 71
| 30.142857
| 0.870787
| 0
| 0
| 0
| 0
| 0
| 0.251185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
a0dfb37409cb18601c6a0c5c43a29a2c29c66d5d
| 92
|
py
|
Python
|
orquestador/nyc_ccci_etl/utils/get_current_ip.py
|
gemathus/dpa-2020
|
b5d7a350b058e77a9b358fa2758632fa2265a9cb
|
[
"IJG"
] | 1
|
2020-04-01T01:12:16.000Z
|
2020-04-01T01:12:16.000Z
|
orquestador/nyc_ccci_etl/utils/get_current_ip.py
|
gemathus/dpa-2020
|
b5d7a350b058e77a9b358fa2758632fa2265a9cb
|
[
"IJG"
] | 3
|
2021-06-02T02:01:07.000Z
|
2022-03-12T00:33:16.000Z
|
orquestador/nyc_ccci_etl/utils/get_current_ip.py
|
dpa-2020-equipo-5/dpa-2020
|
b5d7a350b058e77a9b358fa2758632fa2265a9cb
|
[
"IJG"
] | 3
|
2020-05-11T01:15:14.000Z
|
2021-03-12T02:34:23.000Z
|
from requests import get
def get_current_ip():
return get('https://api.ipify.org').text
| 23
| 44
| 0.728261
| 15
| 92
| 4.333333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 92
| 4
| 44
| 23
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
9d024c6e289a42c53e63b7aef0dd65b3149fc5d3
| 2,184
|
py
|
Python
|
2017/Day1-1.py
|
josephgruber/AoC
|
8b103ab9f4ac396b7a0ccf04667443a43bef704d
|
[
"MIT"
] | null | null | null |
2017/Day1-1.py
|
josephgruber/AoC
|
8b103ab9f4ac396b7a0ccf04667443a43bef704d
|
[
"MIT"
] | null | null | null |
2017/Day1-1.py
|
josephgruber/AoC
|
8b103ab9f4ac396b7a0ccf04667443a43bef704d
|
[
"MIT"
] | null | null | null |
inputData = '3294199471327195994824832197564859876682638188889768298894243832665654681412886862234525991553276578641265589959178414218389329361496673991614673626344552179413995562266818138372393213966143124914469397692587251112663217862879233226763533911128893354536353213847122251463857894159819828724827969576432191847787772732881266875469721189331882228146576832921314638221317393256471998598117289632684663355273845983933845721713497811766995367795857965222183668765517454263354111134841334631345111596131682726196574763165187889337599583345634413436165539744188866156771585647718555182529936669683581662398618765391487164715724849894563314426959348119286955144439452731762666568741612153254469131724137699832984728937865956711925592628456617133695259554548719328229938621332325125972547181236812263887375866231118312954369432937359357266467383318326239572877314765121844831126178173988799765218913178825966268816476559792947359956859989228917136267178571776316345292573489873792149646548747995389669692188457724414468727192819919448275922166321158141365237545222633688372891451842434458527698774342111482498999383831492577615154591278719656798277377363284379468757998373193231795767644654155432692988651312845433511879457921638934877557575241394363721667237778962455961493559848522582413748218971212486373232795878362964873855994697149692824917183375545192119453587398199912564474614219929345185468661129966379693813498542474732198176496694746111576925715493967296487258237854152382365579876894391815759815373319159213475555251488754279888245492373595471189191353244684697662848376529881512529221627313527441221459672786923145165989611223372241149929436247374818467481641931872972582295425936998535194423916544367799522276914445231582272368388831834437562752119325286474352863554693373718848649568451797751926315617575295381964426843625282819524747119726872193569785611959896776143539915299968276374712996485367853494734376257511273443736433464496287219615697341973131715166768916149828396454638596713572963686159214116763'
sumTotal = 0
for index, digit in enumerate(inputData):
if inputData[index] == inputData[index-1]:
sumTotal += int(inputData[index])
print(sumTotal)
| 242.666667
| 2,028
| 0.977106
| 22
| 2,184
| 97
| 0.590909
| 0.019681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.934198
| 0.011905
| 2,184
| 9
| 2,029
| 242.666667
| 0.05468
| 0
| 0
| 0
| 0
| 0
| 0.921739
| 0.921739
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c20ba944e75a62eaef6d1ad88caf651ad40f606c
| 175
|
py
|
Python
|
sitepackages/csp/tests/__init__.py
|
bitcpf/djangoage
|
f116860cbfa799eb6c47306a72d742b63c970dce
|
[
"Apache-2.0"
] | null | null | null |
sitepackages/csp/tests/__init__.py
|
bitcpf/djangoage
|
f116860cbfa799eb6c47306a72d742b63c970dce
|
[
"Apache-2.0"
] | null | null | null |
sitepackages/csp/tests/__init__.py
|
bitcpf/djangoage
|
f116860cbfa799eb6c47306a72d742b63c970dce
|
[
"Apache-2.0"
] | null | null | null |
from csp.tests.test_decorators import DecoratorTests # noqa
from csp.tests.test_middleware import MiddlewareTests # noqa
from csp.tests.test_utils import UtilsTests # noqa
| 43.75
| 61
| 0.828571
| 24
| 175
| 5.916667
| 0.5
| 0.147887
| 0.253521
| 0.338028
| 0.28169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 175
| 3
| 62
| 58.333333
| 0.922078
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
dfce7efed020ef4a65b5c2537d48d2788deac6bd
| 203
|
py
|
Python
|
Application/cdpapp/admin.py
|
Adi1222/Customer-Data-Protection
|
3182c4d8faeb233f55da7be61f6488606e7941b8
|
[
"MIT"
] | 1
|
2021-12-03T11:24:57.000Z
|
2021-12-03T11:24:57.000Z
|
PropelRapp/admin.py
|
Adi1222/PropelR
|
453196cb8ad7c251b650c6bd147a8be5ee8eed50
|
[
"MIT"
] | 1
|
2021-05-01T12:28:38.000Z
|
2021-05-01T12:28:38.000Z
|
PropelRapp/admin.py
|
Adi1222/PropelR
|
453196cb8ad7c251b650c6bd147a8be5ee8eed50
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
'''from .models import Cluster, Camera, Customer
# Register your models here.
admin.site.register(Customer)
admin.site.register(Camera)
admin.site.register(Customer)
'''
| 25.375
| 48
| 0.788177
| 27
| 203
| 5.925926
| 0.481481
| 0.16875
| 0.31875
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093596
| 203
| 8
| 49
| 25.375
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
dfe3228a53ef857adbdd6f1ee7d54cae6bdbd815
| 22,363
|
py
|
Python
|
fixtures/fifa.py
|
Abhishek741119/fifa_simulator
|
a9218f272c871863aafdf879c875dda4758725f5
|
[
"MIT"
] | 4
|
2020-11-26T08:54:22.000Z
|
2020-11-28T04:28:08.000Z
|
fixtures/fifa.py
|
Abhishek741119/fifa_simulator
|
a9218f272c871863aafdf879c875dda4758725f5
|
[
"MIT"
] | null | null | null |
fixtures/fifa.py
|
Abhishek741119/fifa_simulator
|
a9218f272c871863aafdf879c875dda4758725f5
|
[
"MIT"
] | null | null | null |
from random import choice as rc
import pandas as pd
from itertools import combinations
def group_game(df,group,i,j,l,subs):
print(" GROUP-",subs)
data=list(group["NAME"])
fix=list(combinations(data,2))
rc1=[1,2,0,1,0,0,0]
rc2=[1,0,1,0,1,0,0]
rc3=[0,1,0,1,0,0,0]
rc4=[0,0,1,0,0,0,0]
arr=[i,j]
for slot in arr:
slice=[fix[slot]]
for p,q in slice:
print("************************")
print("{0} - {1}".format(p,q))
sp=0
sq=0
if group.loc[p,"RATE"]==4:
for k in range(5):
sp+=rc(rc1)
elif group.loc[p,"RATE"]==3:
for k in range(5):
sp+=rc(rc2)
elif group.loc[p,"RATE"]==2:
for k in range(5):
sp+=rc(rc3)
else:
for k in range(5):
sp+=rc(rc4)
if group.loc[q,"RATE"]==4:
for k in range(5):
sq+=rc(rc1)
elif group.loc[q,"RATE"]==3:
for k in range(5):
sq+=rc(rc2)
elif group.loc[q,"RATE"]==2:
for k in range(5):
sq+=rc(rc3)
else:
for k in range(5):
sq+=rc(rc4)
print(" {0}-{1}".format(sp,sq))
print("*************************\n")
df.loc[p,"GAMES"]+=1
df.loc[q,"GAMES"]+=1
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
elif sp<sq:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
else:
df.loc[p,"POINTS"]+=1
df.loc[q,"POINTS"]+=1
df.loc[q,"DRAW"]+=1
df.loc[p,"DRAW"]+=1
chart=df.loc[data[0]:data[3]].sort_values(["WON","POINTS","GD","RATE"],ascending=False)
order=list(chart["NAME"])
for t in range(4):
l[t]=order[t]
print("****************************************************")
print(chart[["GAMES","WON","LOST","DRAW","GD","POINTS"]])
print("****************************************************")
def roundof16_game(df,s,l):
rc1=[1,2,0,0,1,0,1,0,0]
rc2=[1,0,1,0,1,0,0,0,0]
rc3=[0,0,1,0,0,1,0,0,0]
rc4=[0,0,0,1,0,0,0,0,0]
for p,q in s:
print("{0} - {1}".format(p,q))
sp=0
sq=0
if df.loc[p,"RATE"]==4:
for k in range(5):
sp+=rc(rc1)
elif df.loc[p,"RATE"]==3:
for k in range(5):
sp+=rc(rc2)
elif df.loc[p,"RATE"]==2:
for k in range(5):
sp+=rc(rc3)
else:
for k in range(5):
sp+=rc(rc4)
if df.loc[q,"RATE"]==4:
for k in range(5):
sq+=rc(rc1)
elif df.loc[q,"RATE"]==3:
for k in range(5):
sq+=rc(rc2)
elif df.loc[q,"RATE"]==2:
for k in range(5):
sq+=rc(rc3)
else:
for k in range(5):
sq+=rc(rc4)
print(" {0}-{1}".format(sp,sq))
df.loc[p,"GAMES"]+=1
df.loc[q,"GAMES"]+=1
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
l.append(p)
print(p,"ADVANCED TO QUARTER-FINAL")
df.loc[q,"LEVEL"]=2
elif sp<sq:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
l.append(q)
print(q,"ADVANCED TO QUARTER-FINAL")
df.loc[p,"LEVEL"]=2
else:
while sp==sq:
if df.loc[p,"RATE"]==4:
for k in range(3):
sp+=rc(rc1)
elif df.loc[p,"RATE"]==3:
for k in range(3):
sp+=rc(rc2)
elif df.loc[p,"RATE"]==2:
for k in range(3):
sp+=rc(rc3)
else:
for k in range(3):
sp+=rc(rc4)
if df.loc[q,"RATE"]==4:
for k in range(3):
sq+=rc(rc1)
elif df.loc[q,"RATE"]==3:
for k in range(3):
sq+=rc(rc2)
elif df.loc[q,"RATE"]==2:
for k in range(3):
sq+=rc(rc3)
else:
for k in range(3):
sq+=rc(rc4)
print("({0},{1})".format(sp,sq))
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
l.append(p)
print(p,"ADVANCED TO QUARTER-FINAL")
df.loc[q,"LEVEL"]=2
else:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
l.append(q)
print(q,"ADVANCED TO QUARTER-FINAL")
df.loc[p,"LEVEL"]=2
print("*************************")
print("*************************\n")
def quarter_game(df,s,l):
rc1=[1,2,0,0,1,0,1,0,0]
rc2=[1,0,1,0,1,0,0,0,0]
rc3=[0,0,1,0,0,1,0,0,0]
rc4=[0,0,0,1,0,0,0,0,0]
for p,q in s:
print("{0} - {1}".format(p,q))
sp=0
sq=0
if df.loc[p,"RATE"]==4:
for k in range(5):
sp+=rc(rc1)
elif df.loc[p,"RATE"]==3:
for k in range(5):
sp+=rc(rc2)
elif df.loc[p,"RATE"]==2:
for k in range(5):
sp+=rc(rc3)
else:
for k in range(5):
sp+=rc(rc4)
if df.loc[q,"RATE"]==4:
for k in range(5):
sq+=rc(rc1)
elif df.loc[q,"RATE"]==3:
for k in range(5):
sq+=rc(rc2)
elif df.loc[q,"RATE"]==2:
for k in range(5):
sq+=rc(rc3)
else:
for k in range(5):
sq+=rc(rc4)
print(" {0}-{1}".format(sp,sq))
df.loc[p,"GAMES"]+=1
df.loc[q,"GAMES"]+=1
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
l.append(p)
print(p,"ADVANCED TO SEMI-FINAL")
df.loc[q,"LEVEL"]=3
elif sp<sq:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
l.append(q)
print(q,"ADVANCED TO SEMI-FINAL")
df.loc[p,"LEVEL"]=3
else:
while sp==sq:
if df.loc[p,"RATE"]==4:
for k in range(3):
sp+=rc(rc1)
elif df.loc[p,"RATE"]==3:
for k in range(3):
sp+=rc(rc2)
elif df.loc[p,"RATE"]==2:
for k in range(3):
sp+=rc(rc3)
else:
for k in range(3):
sp+=rc(rc4)
if df.loc[q,"RATE"]==4:
for k in range(3):
sq+=rc(rc1)
elif df.loc[q,"RATE"]==3:
for k in range(3):
sq+=rc(rc2)
elif df.loc[q,"RATE"]==2:
for k in range(3):
sq+=rc(rc3)
else:
for k in range(3):
sq+=rc(rc4)
print("({0},{1})".format(sp,sq))
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
l.append(p)
print(p,"ADVANCED TO SEMI-FINAL")
df.loc[q,"LEVEL"]=3
else:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
l.append(q)
print(q,"ADVANCED TO SEMI-FINAL")
df.loc[p,"LEVEL"]=3
print("*************************")
print("*************************")
print("*************************\n")
def semis_game(df,s,l,m):
rc1=[1,2,0,0,1,0,1,0,0]
rc2=[1,0,1,0,1,0,0,0,0]
rc3=[0,0,1,0,0,1,0,0,0]
rc4=[0,0,0,1,0,0,0,0,0]
for p,q in s:
print("{0} - {1}".format(p,q))
sp=0
sq=0
if df.loc[p,"RATE"]==4:
for k in range(5):
sp+=rc(rc1)
elif df.loc[p,"RATE"]==3:
for k in range(5):
sp+=rc(rc2)
elif df.loc[p,"RATE"]==2:
for k in range(5):
sp+=rc(rc3)
else:
for k in range(5):
sp+=rc(rc4)
if df.loc[q,"RATE"]==4:
for k in range(5):
sq+=rc(rc1)
elif df.loc[q,"RATE"]==3:
for k in range(5):
sq+=rc(rc2)
elif df.loc[q,"RATE"]==2:
for k in range(5):
sq+=rc(rc3)
else:
for k in range(5):
sq+=rc(rc4)
print(" {0}-{1}".format(sp,sq))
df.loc[p,"GAMES"]+=1
df.loc[q,"GAMES"]+=1
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
l.append(p)
m.append(q)
print(p,"ADVANCED TO FINAL")
elif sp<sq:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
l.append(q)
m.append(p)
print(q,"ADVANCED TO FINAL")
else:
while sp==sq:
if df.loc[p,"RATE"]==4:
for k in range(3):
sp+=rc(rc1)
elif df.loc[p,"RATE"]==3:
for k in range(3):
sp+=rc(rc2)
elif df.loc[p,"RATE"]==2:
for k in range(3):
sp+=rc(rc3)
else:
for k in range(3):
sp+=rc(rc4)
if df.loc[q,"RATE"]==4:
for k in range(3):
sq+=rc(rc1)
elif df.loc[q,"RATE"]==3:
for k in range(3):
sq+=rc(rc2)
elif df.loc[q,"RATE"]==2:
for k in range(3):
sq+=rc(rc3)
else:
for k in range(3):
sq+=rc(rc4)
print("({0},{1})".format(sp,sq))
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
l.append(p)
m.append(q)
print(p,"ADVANCED TO FINAL")
else:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
l.append(q)
m.append(p)
print(q,"ADVANCED TO FINAL")
print("*************************")
print("*************************")
print("*************************")
print("*************************\n")
def thirdplace_playoff(df,s):
rc1=[1,2,0,0,1,0,1,0,0]
rc2=[1,0,1,0,1,0,0,0,0]
rc3=[0,0,1,0,0,1,0,0,0]
rc4=[0,0,0,1,0,0,0,0,0]
for p,q in s:
print("{0} - {1}".format(p,q))
sp=0
sq=0
if df.loc[p,"RATE"]==4:
for k in range(5):
sp+=rc(rc1)
elif df.loc[p,"RATE"]==3:
for k in range(5):
sp+=rc(rc2)
elif df.loc[p,"RATE"]==2:
for k in range(5):
sp+=rc(rc3)
else:
for k in range(5):
sp+=rc(rc4)
if df.loc[q,"RATE"]==4:
for k in range(5):
sq+=rc(rc1)
elif df.loc[q,"RATE"]==3:
for k in range(5):
sq+=rc(rc2)
elif df.loc[q,"RATE"]==2:
for k in range(5):
sq+=rc(rc3)
else:
for k in range(5):
sq+=rc(rc4)
print(" {0}-{1}".format(sp,sq))
df.loc[p,"GAMES"]+=1
df.loc[q,"GAMES"]+=1
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
print(p,"ATTAINED THIRD POSITION")
df.loc[q,"LEVEL"]=4
df.loc[p,"LEVEL"]=5
elif sp<sq:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
print(q,"ATTAINED THIRD POSITION")
df.loc[p,"LEVEL"]=4
df.loc[q,"LEVEL"]=5
else:
while sp==sq:
if df.loc[p,"RATE"]==4:
for k in range(3):
sp+=rc(rc1)
elif df.loc[p,"RATE"]==3:
for k in range(3):
sp+=rc(rc2)
elif df.loc[p,"RATE"]==2:
for k in range(3):
sp+=rc(rc3)
else:
for k in range(3):
sp+=rc(rc4)
if df.loc[q,"RATE"]==4:
for k in range(3):
sq+=rc(rc1)
elif df.loc[q,"RATE"]==3:
for k in range(3):
sq+=rc(rc2)
elif df.loc[q,"RATE"]==2:
for k in range(3):
sq+=rc(rc3)
else:
for k in range(3):
sq+=rc(rc4)
print("({0},{1})".format(sp,sq))
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
print(p,"ATTAINED THIRD POSITION")
df.loc[q,"LEVEL"]=4
df.loc[p,"LEVEL"]=5
else:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
print(q,"ATTAINED THIRD POSITION")
df.loc[p,"LEVEL"]=4
df.loc[q,"LEVEL"]=5
print("*************************")
print("*************************\n")
def final_game(df,s):
rc1=[1,2,0,0,1,0,1,0,0]
rc2=[1,0,1,0,1,0,0,0,0]
rc3=[0,0,1,0,0,1,0,0,0]
rc4=[0,0,0,1,0,0,0,0,0]
for p,q in s:
print(" {0} - {1}".format(p,q))
sp=0
sq=0
if df.loc[p,"RATE"]==4:
for k in range(5):
sp+=rc(rc1)
elif df.loc[p,"RATE"]==3:
for k in range(5):
sp+=rc(rc2)
elif df.loc[p,"RATE"]==2:
for k in range(5):
sp+=rc(rc3)
else:
for k in range(5):
sp+=rc(rc4)
if df.loc[q,"RATE"]==4:
for k in range(5):
sq+=rc(rc1)
elif df.loc[q,"RATE"]==3:
for k in range(5):
sq+=rc(rc2)
elif df.loc[q,"RATE"]==2:
for k in range(5):
sq+=rc(rc3)
else:
for k in range(5):
sq+=rc(rc4)
print(" {0}-{1}".format(sp,sq))
df.loc[p,"GAMES"]+=1
df.loc[q,"GAMES"]+=1
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
df.loc[q,"LEVEL"]=6
df.loc[p,"LEVEL"]=7
print("============================================================================================================================================================================")
print("============================================================================================================================================================================")
print("***** CHAMPIONS:::-----",p," *****")
print("===========================================================================================================================================================================\n")
elif sp<sq:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
df.loc[p,"LEVEL"]=6
df.loc[q,"LEVEL"]=7
print("============================================================================================================================================================================")
print("============================================================================================================================================================================")
print("***** CHAMPIONS:::-----",q," *****")
print("===========================================================================================================================================================================\n")
else:
while sp==sq:
if df.loc[p,"RATE"]==4:
for k in range(3):
sp+=rc(rc1)
elif df.loc[p,"RATE"]==3:
for k in range(3):
sp+=rc(rc2)
elif df.loc[p,"RATE"]==2:
for k in range(3):
sp+=rc(rc3)
else:
for k in range(3):
sp+=rc(rc4)
if df.loc[q,"RATE"]==4:
for k in range(3):
sq+=rc(rc1)
elif df.loc[q,"RATE"]==3:
for k in range(3):
sq+=rc(rc2)
elif df.loc[q,"RATE"]==2:
for k in range(3):
sq+=rc(rc3)
else:
for k in range(3):
sq+=rc(rc4)
print("({0},{1})".format(sp,sq))
if sp>sq:
df.loc[p,"POINTS"]+=3
df.loc[p,"WON"]+=1
df.loc[q,"LOST"]+=1
df.loc[p,"GD"]+=sp-sq
df.loc[q,"GD"]-=sp-sq
df.loc[q,"LEVEL"]=6
df.loc[p,"LEVEL"]=7
print("============================================================================================================================================================================")
print("============================================================================================================================================================================")
print("***** CHAMPIONS:::-----",p," *****")
print("============================================================================================================================================================================")
else:
df.loc[q,"POINTS"]+=3
df.loc[q,"WON"]+=1
df.loc[p,"LOST"]+=1
df.loc[q,"GD"]+=sq-sp
df.loc[p,"GD"]-=sq-sp
df.loc[p,"LEVEL"]=6
df.loc[q,"LEVEL"]=7
print("============================================================================================================================================================================")
print("============================================================================================================================================================================")
print("***** CHAMPIONS:::-----",q," *****")
print("============================================================================================================================================================================")
| 37.90339
| 198
| 0.289585
| 2,673
| 22,363
| 2.420127
| 0.031425
| 0.163085
| 0.097388
| 0.149637
| 0.925027
| 0.911578
| 0.911578
| 0.910496
| 0.90895
| 0.90895
| 0
| 0.050682
| 0.442383
| 22,363
| 589
| 199
| 37.967742
| 0.468083
| 0
| 0
| 0.938879
| 0
| 0
| 0.202535
| 0.115
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010187
| false
| 0
| 0.005093
| 0
| 0.01528
| 0.112054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a085a8c892833af886ec0b719817b0bdf7ce7ba7
| 95
|
py
|
Python
|
src/sage/dev/all.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 5
|
2015-01-04T07:15:06.000Z
|
2022-03-04T15:15:18.000Z
|
src/sage/dev/all.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | null | null | null |
src/sage/dev/all.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 10
|
2016-09-28T13:12:40.000Z
|
2022-02-12T09:28:34.000Z
|
from sage.misc.lazy_import import lazy_import
lazy_import('sage.dev.sagedev_instance', 'dev')
| 23.75
| 47
| 0.810526
| 15
| 95
| 4.866667
| 0.533333
| 0.410959
| 0.438356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073684
| 95
| 3
| 48
| 31.666667
| 0.829545
| 0
| 0
| 0
| 0
| 0
| 0.294737
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2683e660c991f8978004c5e53caf98b7869c8112
| 18
|
py
|
Python
|
sample_60181885.py
|
jun0433/sample_60181885
|
85d8187ef429ffd4a00edec2a48111ef25f714a6
|
[
"MIT"
] | null | null | null |
sample_60181885.py
|
jun0433/sample_60181885
|
85d8187ef429ffd4a00edec2a48111ef25f714a6
|
[
"MIT"
] | null | null | null |
sample_60181885.py
|
jun0433/sample_60181885
|
85d8187ef429ffd4a00edec2a48111ef25f714a6
|
[
"MIT"
] | null | null | null |
print("60181885")
| 9
| 17
| 0.722222
| 2
| 18
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.470588
| 0.055556
| 18
| 1
| 18
| 18
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
cd0441d91cce6b586a782045ab3e69aaef34b440
| 18,816
|
py
|
Python
|
sdk/python/pulumi_aws_native/xray/outputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/xray/outputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/xray/outputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GroupInsightsConfiguration',
'SamplingRule',
'SamplingRuleRecord',
'SamplingRuleUpdate',
'TagsItemProperties',
]
@pulumi.output_type
class GroupInsightsConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "insightsEnabled":
suggest = "insights_enabled"
elif key == "notificationsEnabled":
suggest = "notifications_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GroupInsightsConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GroupInsightsConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GroupInsightsConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
insights_enabled: Optional[bool] = None,
notifications_enabled: Optional[bool] = None):
"""
:param bool insights_enabled: Set the InsightsEnabled value to true to enable insights or false to disable insights.
:param bool notifications_enabled: Set the NotificationsEnabled value to true to enable insights notifications. Notifications can only be enabled on a group with InsightsEnabled set to true.
"""
if insights_enabled is not None:
pulumi.set(__self__, "insights_enabled", insights_enabled)
if notifications_enabled is not None:
pulumi.set(__self__, "notifications_enabled", notifications_enabled)
@property
@pulumi.getter(name="insightsEnabled")
def insights_enabled(self) -> Optional[bool]:
"""
Set the InsightsEnabled value to true to enable insights or false to disable insights.
"""
return pulumi.get(self, "insights_enabled")
@property
@pulumi.getter(name="notificationsEnabled")
def notifications_enabled(self) -> Optional[bool]:
"""
Set the NotificationsEnabled value to true to enable insights notifications. Notifications can only be enabled on a group with InsightsEnabled set to true.
"""
return pulumi.get(self, "notifications_enabled")
@pulumi.output_type
class SamplingRule(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fixedRate":
suggest = "fixed_rate"
elif key == "hTTPMethod":
suggest = "h_ttp_method"
elif key == "reservoirSize":
suggest = "reservoir_size"
elif key == "resourceARN":
suggest = "resource_arn"
elif key == "ruleARN":
suggest = "rule_arn"
elif key == "ruleName":
suggest = "rule_name"
elif key == "serviceName":
suggest = "service_name"
elif key == "serviceType":
suggest = "service_type"
elif key == "uRLPath":
suggest = "u_rl_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SamplingRule. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SamplingRule.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SamplingRule.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
attributes: Optional[Any] = None,
fixed_rate: Optional[float] = None,
h_ttp_method: Optional[str] = None,
host: Optional[str] = None,
priority: Optional[int] = None,
reservoir_size: Optional[int] = None,
resource_arn: Optional[str] = None,
rule_arn: Optional[str] = None,
rule_name: Optional[str] = None,
service_name: Optional[str] = None,
service_type: Optional[str] = None,
u_rl_path: Optional[str] = None,
version: Optional[int] = None):
"""
:param Any attributes: Matches attributes derived from the request.
:param float fixed_rate: The percentage of matching requests to instrument, after the reservoir is exhausted.
:param str h_ttp_method: Matches the HTTP method from a request URL.
:param str host: Matches the hostname from a request URL.
:param int priority: The priority of the sampling rule.
:param int reservoir_size: A fixed number of matching requests to instrument per second, prior to applying the fixed rate. The reservoir is not used directly by services, but applies to all services using the rule collectively.
:param str resource_arn: Matches the ARN of the AWS resource on which the service runs.
:param str service_name: Matches the name that the service uses to identify itself in segments.
:param str service_type: Matches the origin that the service uses to identify its type in segments.
:param str u_rl_path: Matches the path from a request URL.
:param int version: The version of the sampling rule format (1)
"""
if attributes is not None:
pulumi.set(__self__, "attributes", attributes)
if fixed_rate is not None:
pulumi.set(__self__, "fixed_rate", fixed_rate)
if h_ttp_method is not None:
pulumi.set(__self__, "h_ttp_method", h_ttp_method)
if host is not None:
pulumi.set(__self__, "host", host)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if reservoir_size is not None:
pulumi.set(__self__, "reservoir_size", reservoir_size)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if rule_arn is not None:
pulumi.set(__self__, "rule_arn", rule_arn)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
if service_type is not None:
pulumi.set(__self__, "service_type", service_type)
if u_rl_path is not None:
pulumi.set(__self__, "u_rl_path", u_rl_path)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def attributes(self) -> Optional[Any]:
"""
Matches attributes derived from the request.
"""
return pulumi.get(self, "attributes")
@property
@pulumi.getter(name="fixedRate")
def fixed_rate(self) -> Optional[float]:
"""
The percentage of matching requests to instrument, after the reservoir is exhausted.
"""
return pulumi.get(self, "fixed_rate")
@property
@pulumi.getter(name="hTTPMethod")
def h_ttp_method(self) -> Optional[str]:
"""
Matches the HTTP method from a request URL.
"""
return pulumi.get(self, "h_ttp_method")
@property
@pulumi.getter
def host(self) -> Optional[str]:
"""
Matches the hostname from a request URL.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
The priority of the sampling rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="reservoirSize")
def reservoir_size(self) -> Optional[int]:
"""
A fixed number of matching requests to instrument per second, prior to applying the fixed rate. The reservoir is not used directly by services, but applies to all services using the rule collectively.
"""
return pulumi.get(self, "reservoir_size")
@property
@pulumi.getter(name="resourceARN")
def resource_arn(self) -> Optional[str]:
"""
Matches the ARN of the AWS resource on which the service runs.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter(name="ruleARN")
def rule_arn(self) -> Optional[str]:
return pulumi.get(self, "rule_arn")
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[str]:
return pulumi.get(self, "rule_name")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[str]:
"""
Matches the name that the service uses to identify itself in segments.
"""
return pulumi.get(self, "service_name")
@property
@pulumi.getter(name="serviceType")
def service_type(self) -> Optional[str]:
"""
Matches the origin that the service uses to identify its type in segments.
"""
return pulumi.get(self, "service_type")
@property
@pulumi.getter(name="uRLPath")
def u_rl_path(self) -> Optional[str]:
"""
Matches the path from a request URL.
"""
return pulumi.get(self, "u_rl_path")
@property
@pulumi.getter
def version(self) -> Optional[int]:
"""
The version of the sampling rule format (1)
"""
return pulumi.get(self, "version")
@pulumi.output_type
class SamplingRuleRecord(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "modifiedAt":
suggest = "modified_at"
elif key == "samplingRule":
suggest = "sampling_rule"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SamplingRuleRecord. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SamplingRuleRecord.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SamplingRuleRecord.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
modified_at: Optional[str] = None,
sampling_rule: Optional['outputs.SamplingRule'] = None):
"""
:param str created_at: When the rule was created, in Unix time seconds.
:param str modified_at: When the rule was modified, in Unix time seconds.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if modified_at is not None:
pulumi.set(__self__, "modified_at", modified_at)
if sampling_rule is not None:
pulumi.set(__self__, "sampling_rule", sampling_rule)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
When the rule was created, in Unix time seconds.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="modifiedAt")
def modified_at(self) -> Optional[str]:
"""
When the rule was modified, in Unix time seconds.
"""
return pulumi.get(self, "modified_at")
@property
@pulumi.getter(name="samplingRule")
def sampling_rule(self) -> Optional['outputs.SamplingRule']:
return pulumi.get(self, "sampling_rule")
@pulumi.output_type
class SamplingRuleUpdate(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fixedRate":
suggest = "fixed_rate"
elif key == "hTTPMethod":
suggest = "h_ttp_method"
elif key == "reservoirSize":
suggest = "reservoir_size"
elif key == "resourceARN":
suggest = "resource_arn"
elif key == "ruleARN":
suggest = "rule_arn"
elif key == "ruleName":
suggest = "rule_name"
elif key == "serviceName":
suggest = "service_name"
elif key == "serviceType":
suggest = "service_type"
elif key == "uRLPath":
suggest = "u_rl_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SamplingRuleUpdate. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SamplingRuleUpdate.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SamplingRuleUpdate.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
attributes: Optional[Any] = None,
fixed_rate: Optional[float] = None,
h_ttp_method: Optional[str] = None,
host: Optional[str] = None,
priority: Optional[int] = None,
reservoir_size: Optional[int] = None,
resource_arn: Optional[str] = None,
rule_arn: Optional[str] = None,
rule_name: Optional[str] = None,
service_name: Optional[str] = None,
service_type: Optional[str] = None,
u_rl_path: Optional[str] = None):
"""
:param Any attributes: Matches attributes derived from the request.
:param float fixed_rate: The percentage of matching requests to instrument, after the reservoir is exhausted.
:param str h_ttp_method: Matches the HTTP method from a request URL.
:param str host: Matches the hostname from a request URL.
:param int priority: The priority of the sampling rule.
:param int reservoir_size: A fixed number of matching requests to instrument per second, prior to applying the fixed rate. The reservoir is not used directly by services, but applies to all services using the rule collectively.
:param str resource_arn: Matches the ARN of the AWS resource on which the service runs.
:param str service_name: Matches the name that the service uses to identify itself in segments.
:param str service_type: Matches the origin that the service uses to identify its type in segments.
:param str u_rl_path: Matches the path from a request URL.
"""
if attributes is not None:
pulumi.set(__self__, "attributes", attributes)
if fixed_rate is not None:
pulumi.set(__self__, "fixed_rate", fixed_rate)
if h_ttp_method is not None:
pulumi.set(__self__, "h_ttp_method", h_ttp_method)
if host is not None:
pulumi.set(__self__, "host", host)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if reservoir_size is not None:
pulumi.set(__self__, "reservoir_size", reservoir_size)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if rule_arn is not None:
pulumi.set(__self__, "rule_arn", rule_arn)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
if service_type is not None:
pulumi.set(__self__, "service_type", service_type)
if u_rl_path is not None:
pulumi.set(__self__, "u_rl_path", u_rl_path)
@property
@pulumi.getter
def attributes(self) -> Optional[Any]:
"""
Matches attributes derived from the request.
"""
return pulumi.get(self, "attributes")
@property
@pulumi.getter(name="fixedRate")
def fixed_rate(self) -> Optional[float]:
"""
The percentage of matching requests to instrument, after the reservoir is exhausted.
"""
return pulumi.get(self, "fixed_rate")
@property
@pulumi.getter(name="hTTPMethod")
def h_ttp_method(self) -> Optional[str]:
"""
Matches the HTTP method from a request URL.
"""
return pulumi.get(self, "h_ttp_method")
@property
@pulumi.getter
def host(self) -> Optional[str]:
"""
Matches the hostname from a request URL.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
The priority of the sampling rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="reservoirSize")
def reservoir_size(self) -> Optional[int]:
"""
A fixed number of matching requests to instrument per second, prior to applying the fixed rate. The reservoir is not used directly by services, but applies to all services using the rule collectively.
"""
return pulumi.get(self, "reservoir_size")
@property
@pulumi.getter(name="resourceARN")
def resource_arn(self) -> Optional[str]:
"""
Matches the ARN of the AWS resource on which the service runs.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter(name="ruleARN")
def rule_arn(self) -> Optional[str]:
return pulumi.get(self, "rule_arn")
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[str]:
return pulumi.get(self, "rule_name")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[str]:
"""
Matches the name that the service uses to identify itself in segments.
"""
return pulumi.get(self, "service_name")
@property
@pulumi.getter(name="serviceType")
def service_type(self) -> Optional[str]:
"""
Matches the origin that the service uses to identify its type in segments.
"""
return pulumi.get(self, "service_type")
@property
@pulumi.getter(name="uRLPath")
def u_rl_path(self) -> Optional[str]:
"""
Matches the path from a request URL.
"""
return pulumi.get(self, "u_rl_path")
@pulumi.output_type
class TagsItemProperties(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
| 36.678363
| 235
| 0.617878
| 2,228
| 18,816
| 5.013016
| 0.077648
| 0.022562
| 0.037246
| 0.054436
| 0.842779
| 0.832214
| 0.818784
| 0.793983
| 0.781449
| 0.76766
| 0
| 0.000223
| 0.284279
| 18,816
| 512
| 236
| 36.75
| 0.829138
| 0.229804
| 0
| 0.773529
| 1
| 0.011765
| 0.143349
| 0.008497
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144118
| false
| 0
| 0.017647
| 0.020588
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cd5f236ac6ba3563921c41b0487c9111fcce775b
| 3,306
|
py
|
Python
|
tests/test_ntree.py
|
awoods/ocfl-py
|
ef4ff9d6b9a950088ff5373c4f1dfeec339f034d
|
[
"MIT"
] | 14
|
2018-09-10T20:08:04.000Z
|
2022-03-29T18:10:43.000Z
|
tests/test_ntree.py
|
awoods/ocfl-py
|
ef4ff9d6b9a950088ff5373c4f1dfeec339f034d
|
[
"MIT"
] | 73
|
2019-02-13T20:35:09.000Z
|
2022-03-24T15:21:34.000Z
|
tests/test_ntree.py
|
awoods/ocfl-py
|
ef4ff9d6b9a950088ff5373c4f1dfeec339f034d
|
[
"MIT"
] | 3
|
2019-02-13T18:39:50.000Z
|
2021-05-04T15:39:04.000Z
|
"""Digest tests."""
import unittest
from ocfl.ntree import Ntree
class TestAll(unittest.TestCase):
"""TestAll class to run tests."""
def test01_encode(self):
"""Test encode."""
nt = Ntree()
self.assertEqual(nt.encode(''), '')
self.assertEqual(nt.encode('a'), 'a')
self.assertEqual(nt.encode('a/b:?'), 'a=b+^3f')
def test02_decode(self):
"""Test decode."""
nt = Ntree()
self.assertEqual(nt.decode(''), '')
self.assertEqual(nt.decode('a'), 'a')
self.assertEqual(nt.decode('a=b+^3f'), 'a/b:?')
def test03_identifier_to_path(self):
"""Test path creation."""
nt = Ntree(n=2, encapsulate=False)
self.assertEqual(nt.identifier_to_path(''), '')
self.assertEqual(nt.identifier_to_path('a'), 'a')
self.assertEqual(nt.identifier_to_path('ab'), 'ab')
self.assertEqual(nt.identifier_to_path('abc'), 'ab/c')
self.assertEqual(nt.identifier_to_path('abcde'), 'ab/cd/e')
nt = Ntree(n=3, encapsulate=False)
self.assertEqual(nt.identifier_to_path('abcdefg'), 'abc/def/g')
self.assertEqual(nt.identifier_to_path('abcdefgh'), 'abc/def/gh')
self.assertEqual(nt.identifier_to_path('abcdefghi'), 'abc/def/ghi')
nt = Ntree(n=2)
self.assertEqual(nt.identifier_to_path(''), '')
self.assertEqual(nt.identifier_to_path('a'), 'a/a')
self.assertEqual(nt.identifier_to_path('ab'), 'ab/ab')
self.assertEqual(nt.identifier_to_path('abc'), 'ab/c/abc')
self.assertEqual(nt.identifier_to_path('abcde'), 'ab/cd/e/abcde')
nt = Ntree(n=3)
self.assertEqual(nt.identifier_to_path('abcdefg'), 'abc/def/g/abcdefg')
self.assertEqual(nt.identifier_to_path('abcdefgh'), 'abc/def/gh/abcdefgh')
self.assertEqual(nt.identifier_to_path('abcdefghi'), 'abc/def/ghi/abcdefghi')
def test03_path_to_identifier(self):
"""Test path interpretation."""
nt = Ntree(n=2, encapsulate=False)
self.assertEqual(nt.path_to_identifier(''), '')
self.assertEqual(nt.path_to_identifier('a'), 'a')
self.assertEqual(nt.path_to_identifier('ab'), 'ab')
self.assertEqual(nt.path_to_identifier('ab/c'), 'abc')
self.assertEqual(nt.path_to_identifier('ab/cd/e'), 'abcde')
nt = Ntree(n=3, encapsulate=False)
self.assertEqual(nt.path_to_identifier('abc/def/g'), 'abcdefg')
self.assertEqual(nt.path_to_identifier('abc/def/gh'), 'abcdefgh')
self.assertEqual(nt.path_to_identifier('abc/def/ghi'), 'abcdefghi')
nt = Ntree(n=2)
self.assertEqual(nt.path_to_identifier(''), '')
self.assertEqual(nt.path_to_identifier('a/a'), 'a')
self.assertEqual(nt.path_to_identifier('ab/ab'), 'ab')
self.assertEqual(nt.path_to_identifier('ab/c/abc'), 'abc')
self.assertEqual(nt.path_to_identifier('ab/cd/e/abcde'), 'abcde')
nt = Ntree(n=3)
self.assertEqual(nt.path_to_identifier('abc/def/g/abcdefg'), 'abcdefg')
self.assertEqual(nt.path_to_identifier('abc/def/gh/abcdefgh'), 'abcdefgh')
self.assertEqual(nt.path_to_identifier('abc/def/ghi/abcdefghi'), 'abcdefghi')
# Bad ones
self.assertRaises(Exception, nt.path_to_identifier, 'abc/def/g/a-diff-g')
| 47.228571
| 85
| 0.635209
| 443
| 3,306
| 4.573363
| 0.115124
| 0.281343
| 0.318855
| 0.151037
| 0.836624
| 0.764561
| 0.76308
| 0.734452
| 0.720632
| 0.671273
| 0
| 0.006669
| 0.183606
| 3,306
| 69
| 86
| 47.913043
| 0.743979
| 0.037205
| 0
| 0.25
| 0
| 0
| 0.141497
| 0.013325
| 0
| 0
| 0
| 0
| 0.696429
| 1
| 0.071429
| false
| 0
| 0.035714
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
cd84b7a7724f16025ed88a33ed95eeab88e7f3db
| 116,140
|
py
|
Python
|
dingtalk/python/alibabacloud_dingtalk/exclusive_1_0/client.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 15
|
2020-08-27T04:10:26.000Z
|
2022-03-07T06:25:42.000Z
|
dingtalk/python/alibabacloud_dingtalk/exclusive_1_0/client.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 1
|
2020-09-27T01:30:46.000Z
|
2021-12-29T09:15:34.000Z
|
dingtalk/python/alibabacloud_dingtalk/exclusive_1_0/client.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 5
|
2020-08-27T04:07:44.000Z
|
2021-12-03T02:55:20.000Z
|
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_dingtalk.exclusive_1_0 import models as dingtalkexclusive__1__0_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
if UtilClient.empty(self._endpoint):
self._endpoint = 'api.dingtalk.com'
def get_conference_detail(
self,
conference_id: str,
) -> dingtalkexclusive__1__0_models.GetConferenceDetailResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetConferenceDetailHeaders()
return self.get_conference_detail_with_options(conference_id, headers, runtime)
async def get_conference_detail_async(
self,
conference_id: str,
) -> dingtalkexclusive__1__0_models.GetConferenceDetailResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetConferenceDetailHeaders()
return await self.get_conference_detail_with_options_async(conference_id, headers, runtime)
def get_conference_detail_with_options(
self,
conference_id: str,
headers: dingtalkexclusive__1__0_models.GetConferenceDetailHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetConferenceDetailResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetConferenceDetailResponse(),
self.do_roarequest('GetConferenceDetail', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/conferences/{conference_id}', 'json', req, runtime)
)
async def get_conference_detail_with_options_async(
self,
conference_id: str,
headers: dingtalkexclusive__1__0_models.GetConferenceDetailHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetConferenceDetailResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetConferenceDetailResponse(),
await self.do_roarequest_async('GetConferenceDetail', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/conferences/{conference_id}', 'json', req, runtime)
)
def get_user_app_version_summary(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetUserAppVersionSummaryRequest,
) -> dingtalkexclusive__1__0_models.GetUserAppVersionSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetUserAppVersionSummaryHeaders()
return self.get_user_app_version_summary_with_options(data_id, request, headers, runtime)
async def get_user_app_version_summary_async(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetUserAppVersionSummaryRequest,
) -> dingtalkexclusive__1__0_models.GetUserAppVersionSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetUserAppVersionSummaryHeaders()
return await self.get_user_app_version_summary_with_options_async(data_id, request, headers, runtime)
def get_user_app_version_summary_with_options(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetUserAppVersionSummaryRequest,
headers: dingtalkexclusive__1__0_models.GetUserAppVersionSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetUserAppVersionSummaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetUserAppVersionSummaryResponse(),
self.do_roarequest('GetUserAppVersionSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/appVersion/org/{data_id}', 'json', req, runtime)
)
async def get_user_app_version_summary_with_options_async(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetUserAppVersionSummaryRequest,
headers: dingtalkexclusive__1__0_models.GetUserAppVersionSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetUserAppVersionSummaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetUserAppVersionSummaryResponse(),
await self.do_roarequest_async('GetUserAppVersionSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/appVersion/org/{data_id}', 'json', req, runtime)
)
def delete_comment(
self,
publisher_id: str,
comment_id: str,
) -> dingtalkexclusive__1__0_models.DeleteCommentResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.DeleteCommentHeaders()
return self.delete_comment_with_options(publisher_id, comment_id, headers, runtime)
async def delete_comment_async(
self,
publisher_id: str,
comment_id: str,
) -> dingtalkexclusive__1__0_models.DeleteCommentResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.DeleteCommentHeaders()
return await self.delete_comment_with_options_async(publisher_id, comment_id, headers, runtime)
def delete_comment_with_options(
self,
publisher_id: str,
comment_id: str,
headers: dingtalkexclusive__1__0_models.DeleteCommentHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.DeleteCommentResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.DeleteCommentResponse(),
self.do_roarequest('DeleteComment', 'exclusive_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/exclusive/publishers/{publisher_id}/comments/{comment_id}', 'boolean', req, runtime)
)
async def delete_comment_with_options_async(
self,
publisher_id: str,
comment_id: str,
headers: dingtalkexclusive__1__0_models.DeleteCommentHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.DeleteCommentResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.DeleteCommentResponse(),
await self.do_roarequest_async('DeleteComment', 'exclusive_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/exclusive/publishers/{publisher_id}/comments/{comment_id}', 'boolean', req, runtime)
)
def list_mini_app_history_version(
self,
request: dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionRequest,
) -> dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionHeaders()
return self.list_mini_app_history_version_with_options(request, headers, runtime)
async def list_mini_app_history_version_async(
self,
request: dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionRequest,
) -> dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionHeaders()
return await self.list_mini_app_history_version_with_options_async(request, headers, runtime)
def list_mini_app_history_version_with_options(
self,
request: dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionRequest,
headers: dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.mini_app_id):
query['miniAppId'] = request.mini_app_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionResponse(),
self.do_roarequest('ListMiniAppHistoryVersion', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/miniApps/versions/historyLists', 'json', req, runtime)
)
async def list_mini_app_history_version_with_options_async(
self,
request: dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionRequest,
headers: dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.mini_app_id):
query['miniAppId'] = request.mini_app_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.ListMiniAppHistoryVersionResponse(),
await self.do_roarequest_async('ListMiniAppHistoryVersion', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/miniApps/versions/historyLists', 'json', req, runtime)
)
def get_doc_created_dept_summary(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryRequest,
) -> dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryHeaders()
return self.get_doc_created_dept_summary_with_options(data_id, request, headers, runtime)
async def get_doc_created_dept_summary_async(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryRequest,
) -> dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryHeaders()
return await self.get_doc_created_dept_summary_with_options_async(data_id, request, headers, runtime)
def get_doc_created_dept_summary_with_options(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryRequest,
headers: dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryResponse(),
self.do_roarequest('GetDocCreatedDeptSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/doc/dept/{data_id}', 'json', req, runtime)
)
async def get_doc_created_dept_summary_with_options_async(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryRequest,
headers: dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetDocCreatedDeptSummaryResponse(),
await self.do_roarequest_async('GetDocCreatedDeptSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/doc/dept/{data_id}', 'json', req, runtime)
)
def create_trusted_device(
self,
request: dingtalkexclusive__1__0_models.CreateTrustedDeviceRequest,
) -> dingtalkexclusive__1__0_models.CreateTrustedDeviceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.CreateTrustedDeviceHeaders()
return self.create_trusted_device_with_options(request, headers, runtime)
async def create_trusted_device_async(
self,
request: dingtalkexclusive__1__0_models.CreateTrustedDeviceRequest,
) -> dingtalkexclusive__1__0_models.CreateTrustedDeviceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.CreateTrustedDeviceHeaders()
return await self.create_trusted_device_with_options_async(request, headers, runtime)
def create_trusted_device_with_options(
self,
request: dingtalkexclusive__1__0_models.CreateTrustedDeviceRequest,
headers: dingtalkexclusive__1__0_models.CreateTrustedDeviceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.CreateTrustedDeviceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.platform):
body['platform'] = request.platform
if not UtilClient.is_unset(request.mac_address):
body['macAddress'] = request.mac_address
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.CreateTrustedDeviceResponse(),
self.do_roarequest('CreateTrustedDevice', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/trustedDevices', 'json', req, runtime)
)
async def create_trusted_device_with_options_async(
self,
request: dingtalkexclusive__1__0_models.CreateTrustedDeviceRequest,
headers: dingtalkexclusive__1__0_models.CreateTrustedDeviceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.CreateTrustedDeviceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.platform):
body['platform'] = request.platform
if not UtilClient.is_unset(request.mac_address):
body['macAddress'] = request.mac_address
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.CreateTrustedDeviceResponse(),
await self.do_roarequest_async('CreateTrustedDevice', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/trustedDevices', 'json', req, runtime)
)
def get_partner_type_by_parent_id(
self,
parent_id: str,
) -> dingtalkexclusive__1__0_models.GetPartnerTypeByParentIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetPartnerTypeByParentIdHeaders()
return self.get_partner_type_by_parent_id_with_options(parent_id, headers, runtime)
async def get_partner_type_by_parent_id_async(
self,
parent_id: str,
) -> dingtalkexclusive__1__0_models.GetPartnerTypeByParentIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetPartnerTypeByParentIdHeaders()
return await self.get_partner_type_by_parent_id_with_options_async(parent_id, headers, runtime)
def get_partner_type_by_parent_id_with_options(
self,
parent_id: str,
headers: dingtalkexclusive__1__0_models.GetPartnerTypeByParentIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetPartnerTypeByParentIdResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetPartnerTypeByParentIdResponse(),
self.do_roarequest('GetPartnerTypeByParentId', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/partnerLabels/{parent_id}', 'json', req, runtime)
)
async def get_partner_type_by_parent_id_with_options_async(
self,
parent_id: str,
headers: dingtalkexclusive__1__0_models.GetPartnerTypeByParentIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetPartnerTypeByParentIdResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetPartnerTypeByParentIdResponse(),
await self.do_roarequest_async('GetPartnerTypeByParentId', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/partnerLabels/{parent_id}', 'json', req, runtime)
)
def set_dept_partner_type_and_num(
self,
request: dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumRequest,
) -> dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumHeaders()
return self.set_dept_partner_type_and_num_with_options(request, headers, runtime)
async def set_dept_partner_type_and_num_async(
self,
request: dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumRequest,
) -> dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumHeaders()
return await self.set_dept_partner_type_and_num_with_options_async(request, headers, runtime)
def set_dept_partner_type_and_num_with_options(
self,
request: dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumRequest,
headers: dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.dept_id):
body['deptId'] = request.dept_id
if not UtilClient.is_unset(request.partner_num):
body['partnerNum'] = request.partner_num
if not UtilClient.is_unset(request.label_ids):
body['labelIds'] = request.label_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumResponse(),
self.do_roarequest('SetDeptPartnerTypeAndNum', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/partnerDepartments', 'none', req, runtime)
)
async def set_dept_partner_type_and_num_with_options_async(
self,
request: dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumRequest,
headers: dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.dept_id):
body['deptId'] = request.dept_id
if not UtilClient.is_unset(request.partner_num):
body['partnerNum'] = request.partner_num
if not UtilClient.is_unset(request.label_ids):
body['labelIds'] = request.label_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.SetDeptPartnerTypeAndNumResponse(),
await self.do_roarequest_async('SetDeptPartnerTypeAndNum', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/partnerDepartments', 'none', req, runtime)
)
def get_active_user_summary(
self,
data_id: str,
) -> dingtalkexclusive__1__0_models.GetActiveUserSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetActiveUserSummaryHeaders()
return self.get_active_user_summary_with_options(data_id, headers, runtime)
async def get_active_user_summary_async(
self,
data_id: str,
) -> dingtalkexclusive__1__0_models.GetActiveUserSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetActiveUserSummaryHeaders()
return await self.get_active_user_summary_with_options_async(data_id, headers, runtime)
def get_active_user_summary_with_options(
self,
data_id: str,
headers: dingtalkexclusive__1__0_models.GetActiveUserSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetActiveUserSummaryResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetActiveUserSummaryResponse(),
self.do_roarequest('GetActiveUserSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/dau/org/{data_id}', 'json', req, runtime)
)
async def get_active_user_summary_with_options_async(
self,
data_id: str,
headers: dingtalkexclusive__1__0_models.GetActiveUserSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetActiveUserSummaryResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetActiveUserSummaryResponse(),
await self.do_roarequest_async('GetActiveUserSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/dau/org/{data_id}', 'json', req, runtime)
)
def get_oa_operator_log_list(
self,
request: dingtalkexclusive__1__0_models.GetOaOperatorLogListRequest,
) -> dingtalkexclusive__1__0_models.GetOaOperatorLogListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetOaOperatorLogListHeaders()
return self.get_oa_operator_log_list_with_options(request, headers, runtime)
async def get_oa_operator_log_list_async(
self,
request: dingtalkexclusive__1__0_models.GetOaOperatorLogListRequest,
) -> dingtalkexclusive__1__0_models.GetOaOperatorLogListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetOaOperatorLogListHeaders()
return await self.get_oa_operator_log_list_with_options_async(request, headers, runtime)
def get_oa_operator_log_list_with_options(
self,
request: dingtalkexclusive__1__0_models.GetOaOperatorLogListRequest,
headers: dingtalkexclusive__1__0_models.GetOaOperatorLogListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetOaOperatorLogListResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.op_user_id):
body['opUserId'] = request.op_user_id
if not UtilClient.is_unset(request.start_time):
body['startTime'] = request.start_time
if not UtilClient.is_unset(request.end_time):
body['endTime'] = request.end_time
if not UtilClient.is_unset(request.page_number):
body['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
body['pageSize'] = request.page_size
if not UtilClient.is_unset(request.category_list):
body['categoryList'] = request.category_list
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetOaOperatorLogListResponse(),
self.do_roarequest('GetOaOperatorLogList', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/oaOperatorLogs/query', 'json', req, runtime)
)
async def get_oa_operator_log_list_with_options_async(
self,
request: dingtalkexclusive__1__0_models.GetOaOperatorLogListRequest,
headers: dingtalkexclusive__1__0_models.GetOaOperatorLogListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetOaOperatorLogListResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.op_user_id):
body['opUserId'] = request.op_user_id
if not UtilClient.is_unset(request.start_time):
body['startTime'] = request.start_time
if not UtilClient.is_unset(request.end_time):
body['endTime'] = request.end_time
if not UtilClient.is_unset(request.page_number):
body['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
body['pageSize'] = request.page_size
if not UtilClient.is_unset(request.category_list):
body['categoryList'] = request.category_list
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetOaOperatorLogListResponse(),
await self.do_roarequest_async('GetOaOperatorLogList', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/oaOperatorLogs/query', 'json', req, runtime)
)
def rollback_mini_app_version(
self,
request: dingtalkexclusive__1__0_models.RollbackMiniAppVersionRequest,
) -> dingtalkexclusive__1__0_models.RollbackMiniAppVersionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.RollbackMiniAppVersionHeaders()
return self.rollback_mini_app_version_with_options(request, headers, runtime)
async def rollback_mini_app_version_async(
self,
request: dingtalkexclusive__1__0_models.RollbackMiniAppVersionRequest,
) -> dingtalkexclusive__1__0_models.RollbackMiniAppVersionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.RollbackMiniAppVersionHeaders()
return await self.rollback_mini_app_version_with_options_async(request, headers, runtime)
def rollback_mini_app_version_with_options(
self,
request: dingtalkexclusive__1__0_models.RollbackMiniAppVersionRequest,
headers: dingtalkexclusive__1__0_models.RollbackMiniAppVersionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.RollbackMiniAppVersionResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_client_id):
body['dingClientId'] = request.ding_client_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.rollback_version):
body['rollbackVersion'] = request.rollback_version
if not UtilClient.is_unset(request.target_version):
body['targetVersion'] = request.target_version
if not UtilClient.is_unset(request.mini_app_id):
body['miniAppId'] = request.mini_app_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.RollbackMiniAppVersionResponse(),
self.do_roarequest('RollbackMiniAppVersion', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/miniApps/versions/rollback', 'json', req, runtime)
)
async def rollback_mini_app_version_with_options_async(
self,
request: dingtalkexclusive__1__0_models.RollbackMiniAppVersionRequest,
headers: dingtalkexclusive__1__0_models.RollbackMiniAppVersionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.RollbackMiniAppVersionResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_client_id):
body['dingClientId'] = request.ding_client_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.rollback_version):
body['rollbackVersion'] = request.rollback_version
if not UtilClient.is_unset(request.target_version):
body['targetVersion'] = request.target_version
if not UtilClient.is_unset(request.mini_app_id):
body['miniAppId'] = request.mini_app_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.RollbackMiniAppVersionResponse(),
await self.do_roarequest_async('RollbackMiniAppVersion', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/miniApps/versions/rollback', 'json', req, runtime)
)
def publish_file_change_notice(
self,
request: dingtalkexclusive__1__0_models.PublishFileChangeNoticeRequest,
) -> dingtalkexclusive__1__0_models.PublishFileChangeNoticeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.PublishFileChangeNoticeHeaders()
return self.publish_file_change_notice_with_options(request, headers, runtime)
async def publish_file_change_notice_async(
self,
request: dingtalkexclusive__1__0_models.PublishFileChangeNoticeRequest,
) -> dingtalkexclusive__1__0_models.PublishFileChangeNoticeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.PublishFileChangeNoticeHeaders()
return await self.publish_file_change_notice_with_options_async(request, headers, runtime)
def publish_file_change_notice_with_options(
self,
request: dingtalkexclusive__1__0_models.PublishFileChangeNoticeRequest,
headers: dingtalkexclusive__1__0_models.PublishFileChangeNoticeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.PublishFileChangeNoticeResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.file_id):
body['fileId'] = request.file_id
if not UtilClient.is_unset(request.space_id):
body['spaceId'] = request.space_id
if not UtilClient.is_unset(request.operator_union_id):
body['operatorUnionId'] = request.operator_union_id
if not UtilClient.is_unset(request.operate_type):
body['operateType'] = request.operate_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.PublishFileChangeNoticeResponse(),
self.do_roarequest('PublishFileChangeNotice', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/comments/send', 'none', req, runtime)
)
async def publish_file_change_notice_with_options_async(
self,
request: dingtalkexclusive__1__0_models.PublishFileChangeNoticeRequest,
headers: dingtalkexclusive__1__0_models.PublishFileChangeNoticeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.PublishFileChangeNoticeResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.file_id):
body['fileId'] = request.file_id
if not UtilClient.is_unset(request.space_id):
body['spaceId'] = request.space_id
if not UtilClient.is_unset(request.operator_union_id):
body['operatorUnionId'] = request.operator_union_id
if not UtilClient.is_unset(request.operate_type):
body['operateType'] = request.operate_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.PublishFileChangeNoticeResponse(),
await self.do_roarequest_async('PublishFileChangeNotice', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/comments/send', 'none', req, runtime)
)
def get_general_form_created_dept_summary(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryRequest,
) -> dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryHeaders()
return self.get_general_form_created_dept_summary_with_options(data_id, request, headers, runtime)
async def get_general_form_created_dept_summary_async(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryRequest,
) -> dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryHeaders()
return await self.get_general_form_created_dept_summary_with_options_async(data_id, request, headers, runtime)
def get_general_form_created_dept_summary_with_options(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryRequest,
headers: dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryResponse(),
self.do_roarequest('GetGeneralFormCreatedDeptSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/generalForm/dept/{data_id}', 'json', req, runtime)
)
async def get_general_form_created_dept_summary_with_options_async(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryRequest,
headers: dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetGeneralFormCreatedDeptSummaryResponse(),
await self.do_roarequest_async('GetGeneralFormCreatedDeptSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/generalForm/dept/{data_id}', 'json', req, runtime)
)
def get_calender_summary(
self,
data_id: str,
) -> dingtalkexclusive__1__0_models.GetCalenderSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetCalenderSummaryHeaders()
return self.get_calender_summary_with_options(data_id, headers, runtime)
async def get_calender_summary_async(
self,
data_id: str,
) -> dingtalkexclusive__1__0_models.GetCalenderSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetCalenderSummaryHeaders()
return await self.get_calender_summary_with_options_async(data_id, headers, runtime)
def get_calender_summary_with_options(
self,
data_id: str,
headers: dingtalkexclusive__1__0_models.GetCalenderSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetCalenderSummaryResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetCalenderSummaryResponse(),
self.do_roarequest('GetCalenderSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/calendar/org/{data_id}', 'json', req, runtime)
)
async def get_calender_summary_with_options_async(
self,
data_id: str,
headers: dingtalkexclusive__1__0_models.GetCalenderSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetCalenderSummaryResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetCalenderSummaryResponse(),
await self.do_roarequest_async('GetCalenderSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/calendar/org/{data_id}', 'json', req, runtime)
)
def get_all_labelable_depts(self) -> dingtalkexclusive__1__0_models.GetAllLabelableDeptsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetAllLabelableDeptsHeaders()
return self.get_all_labelable_depts_with_options(headers, runtime)
async def get_all_labelable_depts_async(self) -> dingtalkexclusive__1__0_models.GetAllLabelableDeptsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetAllLabelableDeptsHeaders()
return await self.get_all_labelable_depts_with_options_async(headers, runtime)
def get_all_labelable_depts_with_options(
self,
headers: dingtalkexclusive__1__0_models.GetAllLabelableDeptsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetAllLabelableDeptsResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetAllLabelableDeptsResponse(),
self.do_roarequest('GetAllLabelableDepts', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/partnerDepartments', 'json', req, runtime)
)
async def get_all_labelable_depts_with_options_async(
self,
headers: dingtalkexclusive__1__0_models.GetAllLabelableDeptsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetAllLabelableDeptsResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetAllLabelableDeptsResponse(),
await self.do_roarequest_async('GetAllLabelableDepts', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/partnerDepartments', 'json', req, runtime)
)
def get_publisher_summary(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetPublisherSummaryRequest,
) -> dingtalkexclusive__1__0_models.GetPublisherSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetPublisherSummaryHeaders()
return self.get_publisher_summary_with_options(data_id, request, headers, runtime)
async def get_publisher_summary_async(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetPublisherSummaryRequest,
) -> dingtalkexclusive__1__0_models.GetPublisherSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetPublisherSummaryHeaders()
return await self.get_publisher_summary_with_options_async(data_id, request, headers, runtime)
def get_publisher_summary_with_options(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetPublisherSummaryRequest,
headers: dingtalkexclusive__1__0_models.GetPublisherSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetPublisherSummaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetPublisherSummaryResponse(),
self.do_roarequest('GetPublisherSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/publisher/{data_id}', 'json', req, runtime)
)
async def get_publisher_summary_with_options_async(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetPublisherSummaryRequest,
headers: dingtalkexclusive__1__0_models.GetPublisherSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetPublisherSummaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetPublisherSummaryResponse(),
await self.do_roarequest_async('GetPublisherSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/publisher/{data_id}', 'json', req, runtime)
)
def update_mini_app_version_status(
self,
request: dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusRequest,
) -> dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusHeaders()
return self.update_mini_app_version_status_with_options(request, headers, runtime)
async def update_mini_app_version_status_async(
self,
request: dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusRequest,
) -> dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusHeaders()
return await self.update_mini_app_version_status_with_options_async(request, headers, runtime)
def update_mini_app_version_status_with_options(
self,
request: dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusRequest,
headers: dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.version_type):
body['versionType'] = request.version_type
if not UtilClient.is_unset(request.version):
body['version'] = request.version
if not UtilClient.is_unset(request.mini_app_id):
body['miniAppId'] = request.mini_app_id
if not UtilClient.is_unset(request.ding_client_id):
body['dingClientId'] = request.ding_client_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusResponse(),
self.do_roarequest('UpdateMiniAppVersionStatus', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/miniApps/versions/versionStatus', 'json', req, runtime)
)
async def update_mini_app_version_status_with_options_async(
self,
request: dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusRequest,
headers: dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.version_type):
body['versionType'] = request.version_type
if not UtilClient.is_unset(request.version):
body['version'] = request.version
if not UtilClient.is_unset(request.mini_app_id):
body['miniAppId'] = request.mini_app_id
if not UtilClient.is_unset(request.ding_client_id):
body['dingClientId'] = request.ding_client_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.UpdateMiniAppVersionStatusResponse(),
await self.do_roarequest_async('UpdateMiniAppVersionStatus', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/miniApps/versions/versionStatus', 'json', req, runtime)
)
def update_role_visibility(
self,
request: dingtalkexclusive__1__0_models.UpdateRoleVisibilityRequest,
) -> dingtalkexclusive__1__0_models.UpdateRoleVisibilityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.UpdateRoleVisibilityHeaders()
return self.update_role_visibility_with_options(request, headers, runtime)
async def update_role_visibility_async(
self,
request: dingtalkexclusive__1__0_models.UpdateRoleVisibilityRequest,
) -> dingtalkexclusive__1__0_models.UpdateRoleVisibilityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.UpdateRoleVisibilityHeaders()
return await self.update_role_visibility_with_options_async(request, headers, runtime)
def update_role_visibility_with_options(
self,
request: dingtalkexclusive__1__0_models.UpdateRoleVisibilityRequest,
headers: dingtalkexclusive__1__0_models.UpdateRoleVisibilityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.UpdateRoleVisibilityResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.label_id):
body['labelId'] = request.label_id
if not UtilClient.is_unset(request.dept_ids):
body['deptIds'] = request.dept_ids
if not UtilClient.is_unset(request.user_ids):
body['userIds'] = request.user_ids
if not UtilClient.is_unset(request.ding_client_id):
body['dingClientId'] = request.ding_client_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.UpdateRoleVisibilityResponse(),
self.do_roarequest('UpdateRoleVisibility', 'exclusive_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/exclusive/partnerDepartments/visibilityRoles', 'boolean', req, runtime)
)
async def update_role_visibility_with_options_async(
self,
request: dingtalkexclusive__1__0_models.UpdateRoleVisibilityRequest,
headers: dingtalkexclusive__1__0_models.UpdateRoleVisibilityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.UpdateRoleVisibilityResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.label_id):
body['labelId'] = request.label_id
if not UtilClient.is_unset(request.dept_ids):
body['deptIds'] = request.dept_ids
if not UtilClient.is_unset(request.user_ids):
body['userIds'] = request.user_ids
if not UtilClient.is_unset(request.ding_client_id):
body['dingClientId'] = request.ding_client_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.UpdateRoleVisibilityResponse(),
await self.do_roarequest_async('UpdateRoleVisibility', 'exclusive_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/exclusive/partnerDepartments/visibilityRoles', 'boolean', req, runtime)
)
def get_general_form_created_summary(
self,
data_id: str,
) -> dingtalkexclusive__1__0_models.GetGeneralFormCreatedSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetGeneralFormCreatedSummaryHeaders()
return self.get_general_form_created_summary_with_options(data_id, headers, runtime)
async def get_general_form_created_summary_async(
self,
data_id: str,
) -> dingtalkexclusive__1__0_models.GetGeneralFormCreatedSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetGeneralFormCreatedSummaryHeaders()
return await self.get_general_form_created_summary_with_options_async(data_id, headers, runtime)
def get_general_form_created_summary_with_options(
self,
data_id: str,
headers: dingtalkexclusive__1__0_models.GetGeneralFormCreatedSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetGeneralFormCreatedSummaryResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetGeneralFormCreatedSummaryResponse(),
self.do_roarequest('GetGeneralFormCreatedSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/generalForm/org/{data_id}', 'json', req, runtime)
)
async def get_general_form_created_summary_with_options_async(
self,
data_id: str,
headers: dingtalkexclusive__1__0_models.GetGeneralFormCreatedSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetGeneralFormCreatedSummaryResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetGeneralFormCreatedSummaryResponse(),
await self.do_roarequest_async('GetGeneralFormCreatedSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/generalForm/org/{data_id}', 'json', req, runtime)
)
def get_doc_created_summary(
self,
data_id: str,
) -> dingtalkexclusive__1__0_models.GetDocCreatedSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetDocCreatedSummaryHeaders()
return self.get_doc_created_summary_with_options(data_id, headers, runtime)
async def get_doc_created_summary_async(
self,
data_id: str,
) -> dingtalkexclusive__1__0_models.GetDocCreatedSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetDocCreatedSummaryHeaders()
return await self.get_doc_created_summary_with_options_async(data_id, headers, runtime)
def get_doc_created_summary_with_options(
self,
data_id: str,
headers: dingtalkexclusive__1__0_models.GetDocCreatedSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetDocCreatedSummaryResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetDocCreatedSummaryResponse(),
self.do_roarequest('GetDocCreatedSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/doc/org/{data_id}', 'json', req, runtime)
)
async def get_doc_created_summary_with_options_async(
self,
data_id: str,
headers: dingtalkexclusive__1__0_models.GetDocCreatedSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetDocCreatedSummaryResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetDocCreatedSummaryResponse(),
await self.do_roarequest_async('GetDocCreatedSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/doc/org/{data_id}', 'json', req, runtime)
)
def send_app_ding(
self,
request: dingtalkexclusive__1__0_models.SendAppDingRequest,
) -> dingtalkexclusive__1__0_models.SendAppDingResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.SendAppDingHeaders()
return self.send_app_ding_with_options(request, headers, runtime)
async def send_app_ding_async(
self,
request: dingtalkexclusive__1__0_models.SendAppDingRequest,
) -> dingtalkexclusive__1__0_models.SendAppDingResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.SendAppDingHeaders()
return await self.send_app_ding_with_options_async(request, headers, runtime)
def send_app_ding_with_options(
self,
request: dingtalkexclusive__1__0_models.SendAppDingRequest,
headers: dingtalkexclusive__1__0_models.SendAppDingHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.SendAppDingResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.userids):
body['userids'] = request.userids
if not UtilClient.is_unset(request.content):
body['content'] = request.content
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.SendAppDingResponse(),
self.do_roarequest('SendAppDing', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/appDings/send', 'none', req, runtime)
)
async def send_app_ding_with_options_async(
self,
request: dingtalkexclusive__1__0_models.SendAppDingRequest,
headers: dingtalkexclusive__1__0_models.SendAppDingHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.SendAppDingResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.userids):
body['userids'] = request.userids
if not UtilClient.is_unset(request.content):
body['content'] = request.content
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.SendAppDingResponse(),
await self.do_roarequest_async('SendAppDing', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/appDings/send', 'none', req, runtime)
)
def update_partner_visibility(
self,
request: dingtalkexclusive__1__0_models.UpdatePartnerVisibilityRequest,
) -> dingtalkexclusive__1__0_models.UpdatePartnerVisibilityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.UpdatePartnerVisibilityHeaders()
return self.update_partner_visibility_with_options(request, headers, runtime)
async def update_partner_visibility_async(
self,
request: dingtalkexclusive__1__0_models.UpdatePartnerVisibilityRequest,
) -> dingtalkexclusive__1__0_models.UpdatePartnerVisibilityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.UpdatePartnerVisibilityHeaders()
return await self.update_partner_visibility_with_options_async(request, headers, runtime)
def update_partner_visibility_with_options(
self,
request: dingtalkexclusive__1__0_models.UpdatePartnerVisibilityRequest,
headers: dingtalkexclusive__1__0_models.UpdatePartnerVisibilityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.UpdatePartnerVisibilityResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.label_id):
body['labelId'] = request.label_id
if not UtilClient.is_unset(request.dept_ids):
body['deptIds'] = request.dept_ids
if not UtilClient.is_unset(request.user_ids):
body['userIds'] = request.user_ids
if not UtilClient.is_unset(request.ding_client_id):
body['dingClientId'] = request.ding_client_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.UpdatePartnerVisibilityResponse(),
self.do_roarequest('UpdatePartnerVisibility', 'exclusive_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/exclusive/partnerDepartments/visibilityPartners', 'boolean', req, runtime)
)
async def update_partner_visibility_with_options_async(
self,
request: dingtalkexclusive__1__0_models.UpdatePartnerVisibilityRequest,
headers: dingtalkexclusive__1__0_models.UpdatePartnerVisibilityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.UpdatePartnerVisibilityResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.label_id):
body['labelId'] = request.label_id
if not UtilClient.is_unset(request.dept_ids):
body['deptIds'] = request.dept_ids
if not UtilClient.is_unset(request.user_ids):
body['userIds'] = request.user_ids
if not UtilClient.is_unset(request.ding_client_id):
body['dingClientId'] = request.ding_client_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.UpdatePartnerVisibilityResponse(),
await self.do_roarequest_async('UpdatePartnerVisibility', 'exclusive_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/exclusive/partnerDepartments/visibilityPartners', 'boolean', req, runtime)
)
def get_ding_report_dept_summary(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetDingReportDeptSummaryRequest,
) -> dingtalkexclusive__1__0_models.GetDingReportDeptSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetDingReportDeptSummaryHeaders()
return self.get_ding_report_dept_summary_with_options(data_id, request, headers, runtime)
async def get_ding_report_dept_summary_async(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetDingReportDeptSummaryRequest,
) -> dingtalkexclusive__1__0_models.GetDingReportDeptSummaryResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetDingReportDeptSummaryHeaders()
return await self.get_ding_report_dept_summary_with_options_async(data_id, request, headers, runtime)
def get_ding_report_dept_summary_with_options(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetDingReportDeptSummaryRequest,
headers: dingtalkexclusive__1__0_models.GetDingReportDeptSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetDingReportDeptSummaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetDingReportDeptSummaryResponse(),
self.do_roarequest('GetDingReportDeptSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/report/dept/{data_id}', 'json', req, runtime)
)
async def get_ding_report_dept_summary_with_options_async(
self,
data_id: str,
request: dingtalkexclusive__1__0_models.GetDingReportDeptSummaryRequest,
headers: dingtalkexclusive__1__0_models.GetDingReportDeptSummaryHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetDingReportDeptSummaryResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetDingReportDeptSummaryResponse(),
await self.do_roarequest_async('GetDingReportDeptSummary', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/report/dept/{data_id}', 'json', req, runtime)
)
def get_in_active_user_list(
self,
request: dingtalkexclusive__1__0_models.GetInActiveUserListRequest,
) -> dingtalkexclusive__1__0_models.GetInActiveUserListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetInActiveUserListHeaders()
return self.get_in_active_user_list_with_options(request, headers, runtime)
async def get_in_active_user_list_async(
self,
request: dingtalkexclusive__1__0_models.GetInActiveUserListRequest,
) -> dingtalkexclusive__1__0_models.GetInActiveUserListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetInActiveUserListHeaders()
return await self.get_in_active_user_list_with_options_async(request, headers, runtime)
def get_in_active_user_list_with_options(
self,
request: dingtalkexclusive__1__0_models.GetInActiveUserListRequest,
headers: dingtalkexclusive__1__0_models.GetInActiveUserListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetInActiveUserListResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.stat_date):
body['statDate'] = request.stat_date
if not UtilClient.is_unset(request.service_id):
body['serviceId'] = request.service_id
if not UtilClient.is_unset(request.ding_oauth_app_id):
body['dingOauthAppId'] = request.ding_oauth_app_id
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.page_number):
body['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
body['pageSize'] = request.page_size
if not UtilClient.is_unset(request.dept_ids):
body['deptIds'] = request.dept_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetInActiveUserListResponse(),
self.do_roarequest('GetInActiveUserList', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/inactives/users/query', 'json', req, runtime)
)
async def get_in_active_user_list_with_options_async(
self,
request: dingtalkexclusive__1__0_models.GetInActiveUserListRequest,
headers: dingtalkexclusive__1__0_models.GetInActiveUserListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetInActiveUserListResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.stat_date):
body['statDate'] = request.stat_date
if not UtilClient.is_unset(request.service_id):
body['serviceId'] = request.service_id
if not UtilClient.is_unset(request.ding_oauth_app_id):
body['dingOauthAppId'] = request.ding_oauth_app_id
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.page_number):
body['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
body['pageSize'] = request.page_size
if not UtilClient.is_unset(request.dept_ids):
body['deptIds'] = request.dept_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetInActiveUserListResponse(),
await self.do_roarequest_async('GetInActiveUserList', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/inactives/users/query', 'json', req, runtime)
)
def get_trust_device_list(
self,
request: dingtalkexclusive__1__0_models.GetTrustDeviceListRequest,
) -> dingtalkexclusive__1__0_models.GetTrustDeviceListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetTrustDeviceListHeaders()
return self.get_trust_device_list_with_options(request, headers, runtime)
async def get_trust_device_list_async(
self,
request: dingtalkexclusive__1__0_models.GetTrustDeviceListRequest,
) -> dingtalkexclusive__1__0_models.GetTrustDeviceListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetTrustDeviceListHeaders()
return await self.get_trust_device_list_with_options_async(request, headers, runtime)
def get_trust_device_list_with_options(
self,
request: dingtalkexclusive__1__0_models.GetTrustDeviceListRequest,
headers: dingtalkexclusive__1__0_models.GetTrustDeviceListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetTrustDeviceListResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.user_ids):
body['userIds'] = request.user_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetTrustDeviceListResponse(),
self.do_roarequest('GetTrustDeviceList', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/trustedDevices/query', 'json', req, runtime)
)
async def get_trust_device_list_with_options_async(
self,
request: dingtalkexclusive__1__0_models.GetTrustDeviceListRequest,
headers: dingtalkexclusive__1__0_models.GetTrustDeviceListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetTrustDeviceListResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.user_ids):
body['userIds'] = request.user_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetTrustDeviceListResponse(),
await self.do_roarequest_async('GetTrustDeviceList', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/trustedDevices/query', 'json', req, runtime)
)
def list_mini_app_available_version(
self,
request: dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionRequest,
) -> dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionHeaders()
return self.list_mini_app_available_version_with_options(request, headers, runtime)
async def list_mini_app_available_version_async(
self,
request: dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionRequest,
) -> dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionHeaders()
return await self.list_mini_app_available_version_with_options_async(request, headers, runtime)
def list_mini_app_available_version_with_options(
self,
request: dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionRequest,
headers: dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.version_type_set):
body['versionTypeSet'] = request.version_type_set
if not UtilClient.is_unset(request.page_size):
body['pageSize'] = request.page_size
if not UtilClient.is_unset(request.page_number):
body['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_client_id):
body['dingClientId'] = request.ding_client_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.mini_app_id):
body['miniAppId'] = request.mini_app_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionResponse(),
self.do_roarequest('ListMiniAppAvailableVersion', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/miniApps/versions/availableLists', 'json', req, runtime)
)
async def list_mini_app_available_version_with_options_async(
self,
request: dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionRequest,
headers: dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.version_type_set):
body['versionTypeSet'] = request.version_type_set
if not UtilClient.is_unset(request.page_size):
body['pageSize'] = request.page_size
if not UtilClient.is_unset(request.page_number):
body['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_client_id):
body['dingClientId'] = request.ding_client_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.mini_app_id):
body['miniAppId'] = request.mini_app_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.ListMiniAppAvailableVersionResponse(),
await self.do_roarequest_async('ListMiniAppAvailableVersion', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/miniApps/versions/availableLists', 'json', req, runtime)
)
def search_org_inner_group_info(
self,
request: dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoRequest,
) -> dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoHeaders()
return self.search_org_inner_group_info_with_options(request, headers, runtime)
async def search_org_inner_group_info_async(
self,
request: dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoRequest,
) -> dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoHeaders()
return await self.search_org_inner_group_info_with_options_async(request, headers, runtime)
def search_org_inner_group_info_with_options(
self,
request: dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoRequest,
headers: dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_members_count_end):
query['groupMembersCountEnd'] = request.group_members_count_end
if not UtilClient.is_unset(request.sync_to_dingpan):
query['syncToDingpan'] = request.sync_to_dingpan
if not UtilClient.is_unset(request.group_owner):
query['groupOwner'] = request.group_owner
if not UtilClient.is_unset(request.create_time_end):
query['createTimeEnd'] = request.create_time_end
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.create_time_start):
query['createTimeStart'] = request.create_time_start
if not UtilClient.is_unset(request.uuid):
query['uuid'] = request.uuid
if not UtilClient.is_unset(request.group_members_count_start):
query['groupMembersCountStart'] = request.group_members_count_start
if not UtilClient.is_unset(request.last_active_time_end):
query['lastActiveTimeEnd'] = request.last_active_time_end
if not UtilClient.is_unset(request.operator_user_id):
query['operatorUserId'] = request.operator_user_id
if not UtilClient.is_unset(request.group_name):
query['groupName'] = request.group_name
if not UtilClient.is_unset(request.page_start):
query['pageStart'] = request.page_start
if not UtilClient.is_unset(request.last_active_time_start):
query['lastActiveTimeStart'] = request.last_active_time_start
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoResponse(),
self.do_roarequest('SearchOrgInnerGroupInfo', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/securities/orgGroupInfos', 'json', req, runtime)
)
async def search_org_inner_group_info_with_options_async(
self,
request: dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoRequest,
headers: dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_members_count_end):
query['groupMembersCountEnd'] = request.group_members_count_end
if not UtilClient.is_unset(request.sync_to_dingpan):
query['syncToDingpan'] = request.sync_to_dingpan
if not UtilClient.is_unset(request.group_owner):
query['groupOwner'] = request.group_owner
if not UtilClient.is_unset(request.create_time_end):
query['createTimeEnd'] = request.create_time_end
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.create_time_start):
query['createTimeStart'] = request.create_time_start
if not UtilClient.is_unset(request.uuid):
query['uuid'] = request.uuid
if not UtilClient.is_unset(request.group_members_count_start):
query['groupMembersCountStart'] = request.group_members_count_start
if not UtilClient.is_unset(request.last_active_time_end):
query['lastActiveTimeEnd'] = request.last_active_time_end
if not UtilClient.is_unset(request.operator_user_id):
query['operatorUserId'] = request.operator_user_id
if not UtilClient.is_unset(request.group_name):
query['groupName'] = request.group_name
if not UtilClient.is_unset(request.page_start):
query['pageStart'] = request.page_start
if not UtilClient.is_unset(request.last_active_time_start):
query['lastActiveTimeStart'] = request.last_active_time_start
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.SearchOrgInnerGroupInfoResponse(),
await self.do_roarequest_async('SearchOrgInnerGroupInfo', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/securities/orgGroupInfos', 'json', req, runtime)
)
def send_invitation(
self,
request: dingtalkexclusive__1__0_models.SendInvitationRequest,
) -> dingtalkexclusive__1__0_models.SendInvitationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.SendInvitationHeaders()
return self.send_invitation_with_options(request, headers, runtime)
async def send_invitation_async(
self,
request: dingtalkexclusive__1__0_models.SendInvitationRequest,
) -> dingtalkexclusive__1__0_models.SendInvitationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.SendInvitationHeaders()
return await self.send_invitation_with_options_async(request, headers, runtime)
def send_invitation_with_options(
self,
request: dingtalkexclusive__1__0_models.SendInvitationRequest,
headers: dingtalkexclusive__1__0_models.SendInvitationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.SendInvitationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.dept_id):
body['deptId'] = request.dept_id
if not UtilClient.is_unset(request.partner_num):
body['partnerNum'] = request.partner_num
if not UtilClient.is_unset(request.partner_label_id):
body['partnerLabelId'] = request.partner_label_id
if not UtilClient.is_unset(request.phone):
body['phone'] = request.phone
if not UtilClient.is_unset(request.org_alias):
body['orgAlias'] = request.org_alias
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.SendInvitationResponse(),
self.do_roarequest('SendInvitation', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/partnerDepartments/invitations/send', 'none', req, runtime)
)
async def send_invitation_with_options_async(
self,
request: dingtalkexclusive__1__0_models.SendInvitationRequest,
headers: dingtalkexclusive__1__0_models.SendInvitationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.SendInvitationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.dept_id):
body['deptId'] = request.dept_id
if not UtilClient.is_unset(request.partner_num):
body['partnerNum'] = request.partner_num
if not UtilClient.is_unset(request.partner_label_id):
body['partnerLabelId'] = request.partner_label_id
if not UtilClient.is_unset(request.phone):
body['phone'] = request.phone
if not UtilClient.is_unset(request.org_alias):
body['orgAlias'] = request.org_alias
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.SendInvitationResponse(),
await self.do_roarequest_async('SendInvitation', 'exclusive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/exclusive/partnerDepartments/invitations/send', 'none', req, runtime)
)
def get_group_active_info(
self,
request: dingtalkexclusive__1__0_models.GetGroupActiveInfoRequest,
) -> dingtalkexclusive__1__0_models.GetGroupActiveInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetGroupActiveInfoHeaders()
return self.get_group_active_info_with_options(request, headers, runtime)
async def get_group_active_info_async(
self,
request: dingtalkexclusive__1__0_models.GetGroupActiveInfoRequest,
) -> dingtalkexclusive__1__0_models.GetGroupActiveInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetGroupActiveInfoHeaders()
return await self.get_group_active_info_with_options_async(request, headers, runtime)
def get_group_active_info_with_options(
self,
request: dingtalkexclusive__1__0_models.GetGroupActiveInfoRequest,
headers: dingtalkexclusive__1__0_models.GetGroupActiveInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetGroupActiveInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.stat_date):
query['statDate'] = request.stat_date
if not UtilClient.is_unset(request.ding_group_id):
query['dingGroupId'] = request.ding_group_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetGroupActiveInfoResponse(),
self.do_roarequest('GetGroupActiveInfo', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/activeGroups', 'json', req, runtime)
)
async def get_group_active_info_with_options_async(
self,
request: dingtalkexclusive__1__0_models.GetGroupActiveInfoRequest,
headers: dingtalkexclusive__1__0_models.GetGroupActiveInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetGroupActiveInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.stat_date):
query['statDate'] = request.stat_date
if not UtilClient.is_unset(request.ding_group_id):
query['dingGroupId'] = request.ding_group_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetGroupActiveInfoResponse(),
await self.do_roarequest_async('GetGroupActiveInfo', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/data/activeGroups', 'json', req, runtime)
)
def get_comment_list(
self,
publisher_id: str,
request: dingtalkexclusive__1__0_models.GetCommentListRequest,
) -> dingtalkexclusive__1__0_models.GetCommentListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetCommentListHeaders()
return self.get_comment_list_with_options(publisher_id, request, headers, runtime)
async def get_comment_list_async(
self,
publisher_id: str,
request: dingtalkexclusive__1__0_models.GetCommentListRequest,
) -> dingtalkexclusive__1__0_models.GetCommentListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkexclusive__1__0_models.GetCommentListHeaders()
return await self.get_comment_list_with_options_async(publisher_id, request, headers, runtime)
def get_comment_list_with_options(
self,
publisher_id: str,
request: dingtalkexclusive__1__0_models.GetCommentListRequest,
headers: dingtalkexclusive__1__0_models.GetCommentListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetCommentListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetCommentListResponse(),
self.do_roarequest('GetCommentList', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/publishers/{publisher_id}/comments/list', 'json', req, runtime)
)
async def get_comment_list_with_options_async(
self,
publisher_id: str,
request: dingtalkexclusive__1__0_models.GetCommentListRequest,
headers: dingtalkexclusive__1__0_models.GetCommentListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkexclusive__1__0_models.GetCommentListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkexclusive__1__0_models.GetCommentListResponse(),
await self.do_roarequest_async('GetCommentList', 'exclusive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/exclusive/publishers/{publisher_id}/comments/list', 'json', req, runtime)
)
| 51.917747
| 193
| 0.713105
| 12,518
| 116,140
| 6.186691
| 0.029238
| 0.011621
| 0.095435
| 0.125573
| 0.985073
| 0.969152
| 0.95686
| 0.941313
| 0.919285
| 0.902641
| 0
| 0.011058
| 0.204977
| 116,140
| 2,236
| 194
| 51.940966
| 0.827691
| 0.000689
| 0
| 0.822106
| 1
| 0
| 0.080141
| 0.04207
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028937
| false
| 0
| 0.003321
| 0
| 0.089658
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cd84dc75becec4bcf4b7a2f3f2dcd0c4fd101927
| 1,234
|
py
|
Python
|
poke_django/teams/migrations/0002_auto_20201229_1837.py
|
XrossFox/Poke-Django-Api-Test
|
2e272b055fbeb633edb64fb2a3d6a720a50045f8
|
[
"MIT"
] | null | null | null |
poke_django/teams/migrations/0002_auto_20201229_1837.py
|
XrossFox/Poke-Django-Api-Test
|
2e272b055fbeb633edb64fb2a3d6a720a50045f8
|
[
"MIT"
] | null | null | null |
poke_django/teams/migrations/0002_auto_20201229_1837.py
|
XrossFox/Poke-Django-Api-Test
|
2e272b055fbeb633edb64fb2a3d6a720a50045f8
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2020-12-30 00:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teams', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='team',
name='slot_1_dex_id',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='team',
name='slot_2_dex_id',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='team',
name='slot_3_dex_id',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='team',
name='slot_4_dex_id',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='team',
name='slot_5_dex_id',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='team',
name='slot_6_dex_id',
field=models.IntegerField(blank=True, null=True),
),
]
| 28.045455
| 61
| 0.555916
| 129
| 1,234
| 5.124031
| 0.302326
| 0.181543
| 0.226929
| 0.263238
| 0.780635
| 0.780635
| 0.780635
| 0.718608
| 0.718608
| 0.65053
| 0
| 0.030048
| 0.32577
| 1,234
| 43
| 62
| 28.697674
| 0.764423
| 0.036467
| 0
| 0.648649
| 1
| 0
| 0.100253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027027
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
26955a95196dde9594c595f3ca163135bd65e6e2
| 10,176
|
py
|
Python
|
trash-classify/models/model1.py
|
scp19801980/Trash-classify-M_b-Xception
|
1cfb0bc111a356770fcade17bc8b83e13c4838f5
|
[
"MIT"
] | 5
|
2020-08-08T11:38:54.000Z
|
2021-11-04T02:15:06.000Z
|
trash-classify/models/model1.py
|
scp19801980/Trash-classify-M_b-Xception
|
1cfb0bc111a356770fcade17bc8b83e13c4838f5
|
[
"MIT"
] | null | null | null |
trash-classify/models/model1.py
|
scp19801980/Trash-classify-M_b-Xception
|
1cfb0bc111a356770fcade17bc8b83e13c4838f5
|
[
"MIT"
] | 1
|
2020-08-08T11:41:00.000Z
|
2020-08-08T11:41:00.000Z
|
#Xception
from keras import layers
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, BatchNormalization
from keras.layers import Activation, Input, Dense, SeparableConv2D
from keras.models import Model
from keras.regularizers import l2
def Xception(input_shape, num_classes):
l2_reg = 1e-4
img_input = Input(shape=input_shape)
x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(img_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
residual = Conv2D(256, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
residual = Conv2D(728, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
residual = x
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = layers.add([x, residual])
residual = x
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = layers.add([x, residual])
residual = x
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = layers.add([x, residual])
residual = x
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = layers.add([x, residual])
residual = x
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = layers.add([x, residual])
residual = x
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = layers.add([x, residual])
residual = x
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = layers.add([x, residual])
residual = x
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = layers.add([x, residual])
residual = Conv2D(1024, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False,
kernel_regularizer=l2(l2_reg), kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(num_classes, activation='softmax')(x)
model = Model(img_input, x)
return model
#input_shape = (229, 229, 3)
#num_classes = 6
#Xception(input_shape, num_classes).summary()
| 44.436681
| 90
| 0.595814
| 1,230
| 10,176
| 4.769919
| 0.045528
| 0.040225
| 0.081814
| 0.116584
| 0.921254
| 0.909835
| 0.909835
| 0.909835
| 0.902506
| 0.902506
| 0
| 0.047332
| 0.250491
| 10,176
| 229
| 91
| 44.436681
| 0.721909
| 0.009237
| 0
| 0.884817
| 0
| 0
| 0.064867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005236
| false
| 0
| 0.026178
| 0
| 0.036649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f8c39a3a832ec12d600e09a3b4aa6062b09f2f7b
| 5,773
|
py
|
Python
|
config.py
|
seo3650/video-question-answering
|
fd796a5b470bb1303ebeb3d8de957451ec83700d
|
[
"MIT"
] | 59
|
2017-09-19T03:02:18.000Z
|
2019-11-28T01:57:48.000Z
|
config.py
|
seo3650/video-question-answering
|
fd796a5b470bb1303ebeb3d8de957451ec83700d
|
[
"MIT"
] | null | null | null |
config.py
|
seo3650/video-question-answering
|
fd796a5b470bb1303ebeb3d8de957451ec83700d
|
[
"MIT"
] | 16
|
2017-11-01T00:16:36.000Z
|
2019-10-20T08:43:56.000Z
|
"""Configuration for models."""
import os
import tensorflow as tf
CONFIG = {
'esa': {
'msvd_qa': {
'0': {
'model': {
'word_dim': 300,
'vocab_num': 4000,
'pretrained_embedding': 'data/msvd_qa/word_embedding.npy',
'video_feature_dim': 4096,
'video_feature_num': 20,
'answer_num': 1000,
'common_dim': 256
},
'train': {
'batch_size': 32,
'reg_coeff': 1e-5,
'learning_rate': 0.001
}
}
},
'msrvtt_qa': {
'0': {
'model': {
'word_dim': 300,
'vocab_num': 8000,
'pretrained_embedding': 'data/msrvtt_qa/word_embedding.npy',
'video_feature_dim': 4096,
'video_feature_num': 20,
'answer_num': 1000,
'common_dim': 256
},
'train': {
'batch_size': 64,
'reg_coeff': 1e-6,
'learning_rate': 0.001
}
}
}
},
'emn': {
'msvd_qa': {
'0': {
'model': {
'word_dim': 300,
'vocab_num': 4000,
'pretrained_embedding': 'data/msvd_qa/word_embedding.npy',
'video_feature_dim': 4096,
'video_feature_num': 20,
'answer_num': 1000,
'memory_dim': 256
},
'train': {
'batch_size': 32,
'reg_coeff': 1e-5,
'learning_rate': 0.001
}
}
},
'msrvtt_qa': {
'0': {
'model': {
'word_dim': 300,
'vocab_num': 8000,
'pretrained_embedding': 'data/msrvtt_qa/word_embedding.npy',
'video_feature_dim': 4096,
'video_feature_num': 20,
'answer_num': 1000,
'memory_dim': 256
},
'train': {
'batch_size': 64,
'reg_coeff': 1e-6,
'learning_rate': 0.001
}
}
}
},
'evqa': {
'msvd_qa': {
'0': {
'model': {
'word_dim': 300,
'vocab_num': 4000,
'pretrained_embedding': 'data/msvd_qa/word_embedding.npy',
'video_feature_dim': 4096,
'video_feature_num': 20,
'answer_num': 1000,
'common_dim': 256
},
'train': {
'batch_size': 32,
'reg_coeff': 1e-5,
'learning_rate': 0.001
}
}
},
'msrvtt_qa': {
'0': {
'model': {
'word_dim': 300,
'vocab_num': 8000,
'pretrained_embedding': 'data/msrvtt_qa/word_embedding.npy',
'video_feature_dim': 4096,
'video_feature_num': 20,
'answer_num': 1000,
'common_dim': 256
},
'train': {
'batch_size': 64,
'reg_coeff': 1e-6,
'learning_rate': 0.001
}
}
}
},
'gra': {
'msvd_qa': {
'0': {
'model': {
'word_dim': 300,
'vocab_num': 4000,
'pretrained_embedding': 'data/msvd_qa/word_embedding.npy',
'appear_dim': 4096,
'frame_num': 20,
'motion_dim': 4096,
'clip_num': 20,
'answer_num': 1000,
'common_dim': 256
},
'train': {
'batch_size': 32,
'reg_coeff': 1e-6,
'shu_coeff': 1e-5,
'learning_rate': 0.001
}
}
},
'msrvtt_qa': {
'0': {
'model': {
'word_dim': 300,
'vocab_num': 8000,
'pretrained_embedding': 'data/msrvtt_qa/word_embedding.npy',
'appear_dim': 4096,
'frame_num': 20,
'motion_dim': 4096,
'clip_num': 20,
'answer_num': 1000,
'common_dim': 256
},
'train': {
'batch_size': 64,
'reg_coeff': 1e-7,
'shu_coeff': 1e-7,
'learning_rate': 0.001
}
}
}
}
}
def get(model, dataset, config_id, gpu_list):
"""Generate configuration."""
config = {}
if dataset == 'msvd_qa':
config['preprocess_dir'] = 'data/msvd_qa'
elif dataset == 'msrvtt_qa':
config['preprocess_dir'] = 'data/msrvtt_qa'
config['model'] = CONFIG[model][dataset][config_id]['model']
config['train'] = CONFIG[model][dataset][config_id]['train']
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess_config.gpu_options.visible_device_list = gpu_list
config['session'] = sess_config
return config
| 31.037634
| 80
| 0.364802
| 455
| 5,773
| 4.316484
| 0.171429
| 0.07332
| 0.032587
| 0.04888
| 0.809572
| 0.757637
| 0.757637
| 0.757637
| 0.757637
| 0.757637
| 0
| 0.08908
| 0.517755
| 5,773
| 185
| 81
| 31.205405
| 0.616379
| 0.008488
| 0
| 0.657143
| 0
| 0
| 0.26711
| 0.04481
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005714
| false
| 0
| 0.011429
| 0
| 0.022857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3e44ce3ad007c9a9079e2a432648dcd4ccaebe52
| 5,734
|
py
|
Python
|
ljdata/ds/JPEGBaseline/__init__.py
|
scaramallion/pylibjpeg-data
|
2ab4b8a65b070656eca2582bd23197a3d01cdccd
|
[
"MIT"
] | 3
|
2020-03-07T21:54:09.000Z
|
2020-06-11T02:23:58.000Z
|
ljdata/ds/JPEGBaseline/__init__.py
|
scaramallion/pylibjpeg-data
|
2ab4b8a65b070656eca2582bd23197a3d01cdccd
|
[
"MIT"
] | 2
|
2020-03-08T00:35:51.000Z
|
2020-07-19T23:42:13.000Z
|
ljdata/ds/JPEGBaseline/__init__.py
|
scaramallion/pylibjpeg-data
|
2ab4b8a65b070656eca2582bd23197a3d01cdccd
|
[
"MIT"
] | 1
|
2020-03-07T21:49:07.000Z
|
2020-03-07T21:49:07.000Z
|
"""1.2.840.10008.1.2.4.50 - JPEG Baseline (Process 1)"""
INDEX = {
"color3d_jpeg_baseline.dcm" : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL_422'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '120'),
'Rows' : ('US', 480),
'Columns' : ('US', 640),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
"JPEGBaseline_1s_1f_u_08_08.dcm" : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 1),
'PhotometricInterpretation' : ('CS', 'MONOCHROME2'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
'ImageComments' : (
'LT',
(
"Created from SC_rgb_jpeg_dcmtk.dcm using IJG's cjpeg with "
"-grayscale and -baseline flags"
)
),
},
'SC_rgb_dcmtk_+eb+cr.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'RGB'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_dcmtk_+eb+cy+n1.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_dcmtk_+eb+cy+n2.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_dcmtk_+eb+cy+np.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL_422'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_dcmtk_+eb+cy+s2.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL_422'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_dcmtk_+eb+cy+s4.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_jpeg_dcmtk.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_jpeg_lossy_gdcm.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'RGB'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_small_odd_jpeg.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 3),
'Columns' : ('US', 3),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
}
| 36.993548
| 76
| 0.465992
| 551
| 5,734
| 4.758621
| 0.128857
| 0.018307
| 0.022883
| 0.045767
| 0.903509
| 0.895118
| 0.895118
| 0.895118
| 0.889397
| 0.889397
| 0
| 0.084985
| 0.296128
| 5,734
| 154
| 77
| 37.233766
| 0.564668
| 0.00872
| 0
| 0.75
| 0
| 0
| 0.461958
| 0.144065
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3e6022f20ff980ed2ff22e2f8e9f752cf95febf2
| 112
|
py
|
Python
|
example.py
|
EMCain/python-demo-dates
|
d4d198cd916bd395ea89744af2057610eef64cb0
|
[
"MIT"
] | null | null | null |
example.py
|
EMCain/python-demo-dates
|
d4d198cd916bd395ea89744af2057610eef64cb0
|
[
"MIT"
] | null | null | null |
example.py
|
EMCain/python-demo-dates
|
d4d198cd916bd395ea89744af2057610eef64cb0
|
[
"MIT"
] | null | null | null |
from src.info_string import this_year_info_string
if __name__ == "__main__":
print(this_year_info_string())
| 28
| 49
| 0.794643
| 17
| 112
| 4.352941
| 0.647059
| 0.405405
| 0.324324
| 0.486486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116071
| 112
| 4
| 50
| 28
| 0.747475
| 0
| 0
| 0
| 0
| 0
| 0.070796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
3e7cf7c8821734d25608263070cef557f49ca488
| 23,674
|
py
|
Python
|
sdk/python/pulumi_cloudflare/logpush_job.py
|
pulumi/pulumi-cloudflare
|
d444af2fab6101b388a15cf2e3933e45e9935cc6
|
[
"ECL-2.0",
"Apache-2.0"
] | 35
|
2019-03-14T21:29:29.000Z
|
2022-03-30T00:00:59.000Z
|
sdk/python/pulumi_cloudflare/logpush_job.py
|
pulumi/pulumi-cloudflare
|
d444af2fab6101b388a15cf2e3933e45e9935cc6
|
[
"ECL-2.0",
"Apache-2.0"
] | 128
|
2019-03-08T23:45:58.000Z
|
2022-03-31T21:05:22.000Z
|
sdk/python/pulumi_cloudflare/logpush_job.py
|
pulumi/pulumi-cloudflare
|
d444af2fab6101b388a15cf2e3933e45e9935cc6
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2019-05-10T12:52:56.000Z
|
2020-03-24T15:02:14.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['LogpushJobArgs', 'LogpushJob']
@pulumi.input_type
class LogpushJobArgs:
def __init__(__self__, *,
dataset: pulumi.Input[str],
destination_conf: pulumi.Input[str],
zone_id: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None,
logpull_options: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_challenge: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a LogpushJob resource.
:param pulumi.Input[str] dataset: Which type of dataset resource to use. Available values are `"firewall_events"`, `"http_requests"`, `"spectrum_events"` and `"nel_reports"`.
:param pulumi.Input[str] destination_conf: Uniquely identifies a resource (such as an s3 bucket) where data will be pushed. Additional configuration parameters supported by the destination may be included. See [Logpush destination documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#destination).
:param pulumi.Input[str] zone_id: The zone ID where the logpush job should be created.
:param pulumi.Input[bool] enabled: Whether to enable the job.
:param pulumi.Input[str] logpull_options: Configuration string for the Logshare API. It specifies things like requested fields and timestamp formats. See [Logpull options documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#options).
:param pulumi.Input[str] name: The name of the logpush job to create. Must match the regular expression `^[a-zA-Z0-9\-\.]*$`.
:param pulumi.Input[str] ownership_challenge: Ownership challenge token to prove destination ownership, required when destination is Amazon S3, Google Cloud Storage,
Microsoft Azure or Sumo Logic. See [Developer documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#usage).
"""
pulumi.set(__self__, "dataset", dataset)
pulumi.set(__self__, "destination_conf", destination_conf)
pulumi.set(__self__, "zone_id", zone_id)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if logpull_options is not None:
pulumi.set(__self__, "logpull_options", logpull_options)
if name is not None:
pulumi.set(__self__, "name", name)
if ownership_challenge is not None:
pulumi.set(__self__, "ownership_challenge", ownership_challenge)
@property
@pulumi.getter
def dataset(self) -> pulumi.Input[str]:
"""
Which type of dataset resource to use. Available values are `"firewall_events"`, `"http_requests"`, `"spectrum_events"` and `"nel_reports"`.
"""
return pulumi.get(self, "dataset")
@dataset.setter
def dataset(self, value: pulumi.Input[str]):
pulumi.set(self, "dataset", value)
@property
@pulumi.getter(name="destinationConf")
def destination_conf(self) -> pulumi.Input[str]:
"""
Uniquely identifies a resource (such as an s3 bucket) where data will be pushed. Additional configuration parameters supported by the destination may be included. See [Logpush destination documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#destination).
"""
return pulumi.get(self, "destination_conf")
@destination_conf.setter
def destination_conf(self, value: pulumi.Input[str]):
pulumi.set(self, "destination_conf", value)
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> pulumi.Input[str]:
"""
The zone ID where the logpush job should be created.
"""
return pulumi.get(self, "zone_id")
@zone_id.setter
def zone_id(self, value: pulumi.Input[str]):
pulumi.set(self, "zone_id", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable the job.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="logpullOptions")
def logpull_options(self) -> Optional[pulumi.Input[str]]:
"""
Configuration string for the Logshare API. It specifies things like requested fields and timestamp formats. See [Logpull options documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#options).
"""
return pulumi.get(self, "logpull_options")
@logpull_options.setter
def logpull_options(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logpull_options", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the logpush job to create. Must match the regular expression `^[a-zA-Z0-9\-\.]*$`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="ownershipChallenge")
def ownership_challenge(self) -> Optional[pulumi.Input[str]]:
"""
Ownership challenge token to prove destination ownership, required when destination is Amazon S3, Google Cloud Storage,
Microsoft Azure or Sumo Logic. See [Developer documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#usage).
"""
return pulumi.get(self, "ownership_challenge")
@ownership_challenge.setter
def ownership_challenge(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ownership_challenge", value)
@pulumi.input_type
class _LogpushJobState:
def __init__(__self__, *,
dataset: Optional[pulumi.Input[str]] = None,
destination_conf: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
logpull_options: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_challenge: Optional[pulumi.Input[str]] = None,
zone_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering LogpushJob resources.
:param pulumi.Input[str] dataset: Which type of dataset resource to use. Available values are `"firewall_events"`, `"http_requests"`, `"spectrum_events"` and `"nel_reports"`.
:param pulumi.Input[str] destination_conf: Uniquely identifies a resource (such as an s3 bucket) where data will be pushed. Additional configuration parameters supported by the destination may be included. See [Logpush destination documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#destination).
:param pulumi.Input[bool] enabled: Whether to enable the job.
:param pulumi.Input[str] logpull_options: Configuration string for the Logshare API. It specifies things like requested fields and timestamp formats. See [Logpull options documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#options).
:param pulumi.Input[str] name: The name of the logpush job to create. Must match the regular expression `^[a-zA-Z0-9\-\.]*$`.
:param pulumi.Input[str] ownership_challenge: Ownership challenge token to prove destination ownership, required when destination is Amazon S3, Google Cloud Storage,
Microsoft Azure or Sumo Logic. See [Developer documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#usage).
:param pulumi.Input[str] zone_id: The zone ID where the logpush job should be created.
"""
if dataset is not None:
pulumi.set(__self__, "dataset", dataset)
if destination_conf is not None:
pulumi.set(__self__, "destination_conf", destination_conf)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if logpull_options is not None:
pulumi.set(__self__, "logpull_options", logpull_options)
if name is not None:
pulumi.set(__self__, "name", name)
if ownership_challenge is not None:
pulumi.set(__self__, "ownership_challenge", ownership_challenge)
if zone_id is not None:
pulumi.set(__self__, "zone_id", zone_id)
@property
@pulumi.getter
def dataset(self) -> Optional[pulumi.Input[str]]:
"""
Which type of dataset resource to use. Available values are `"firewall_events"`, `"http_requests"`, `"spectrum_events"` and `"nel_reports"`.
"""
return pulumi.get(self, "dataset")
@dataset.setter
def dataset(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dataset", value)
@property
@pulumi.getter(name="destinationConf")
def destination_conf(self) -> Optional[pulumi.Input[str]]:
"""
Uniquely identifies a resource (such as an s3 bucket) where data will be pushed. Additional configuration parameters supported by the destination may be included. See [Logpush destination documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#destination).
"""
return pulumi.get(self, "destination_conf")
@destination_conf.setter
def destination_conf(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_conf", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable the job.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="logpullOptions")
def logpull_options(self) -> Optional[pulumi.Input[str]]:
"""
Configuration string for the Logshare API. It specifies things like requested fields and timestamp formats. See [Logpull options documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#options).
"""
return pulumi.get(self, "logpull_options")
@logpull_options.setter
def logpull_options(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logpull_options", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the logpush job to create. Must match the regular expression `^[a-zA-Z0-9\-\.]*$`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="ownershipChallenge")
def ownership_challenge(self) -> Optional[pulumi.Input[str]]:
"""
Ownership challenge token to prove destination ownership, required when destination is Amazon S3, Google Cloud Storage,
Microsoft Azure or Sumo Logic. See [Developer documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#usage).
"""
return pulumi.get(self, "ownership_challenge")
@ownership_challenge.setter
def ownership_challenge(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ownership_challenge", value)
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> Optional[pulumi.Input[str]]:
"""
The zone ID where the logpush job should be created.
"""
return pulumi.get(self, "zone_id")
@zone_id.setter
def zone_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone_id", value)
class LogpushJob(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dataset: Optional[pulumi.Input[str]] = None,
destination_conf: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
logpull_options: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_challenge: Optional[pulumi.Input[str]] = None,
zone_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a LogpushJob resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dataset: Which type of dataset resource to use. Available values are `"firewall_events"`, `"http_requests"`, `"spectrum_events"` and `"nel_reports"`.
:param pulumi.Input[str] destination_conf: Uniquely identifies a resource (such as an s3 bucket) where data will be pushed. Additional configuration parameters supported by the destination may be included. See [Logpush destination documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#destination).
:param pulumi.Input[bool] enabled: Whether to enable the job.
:param pulumi.Input[str] logpull_options: Configuration string for the Logshare API. It specifies things like requested fields and timestamp formats. See [Logpull options documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#options).
:param pulumi.Input[str] name: The name of the logpush job to create. Must match the regular expression `^[a-zA-Z0-9\-\.]*$`.
:param pulumi.Input[str] ownership_challenge: Ownership challenge token to prove destination ownership, required when destination is Amazon S3, Google Cloud Storage,
Microsoft Azure or Sumo Logic. See [Developer documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#usage).
:param pulumi.Input[str] zone_id: The zone ID where the logpush job should be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LogpushJobArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a LogpushJob resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param LogpushJobArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LogpushJobArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dataset: Optional[pulumi.Input[str]] = None,
destination_conf: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
logpull_options: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_challenge: Optional[pulumi.Input[str]] = None,
zone_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LogpushJobArgs.__new__(LogpushJobArgs)
if dataset is None and not opts.urn:
raise TypeError("Missing required property 'dataset'")
__props__.__dict__["dataset"] = dataset
if destination_conf is None and not opts.urn:
raise TypeError("Missing required property 'destination_conf'")
__props__.__dict__["destination_conf"] = destination_conf
__props__.__dict__["enabled"] = enabled
__props__.__dict__["logpull_options"] = logpull_options
__props__.__dict__["name"] = name
__props__.__dict__["ownership_challenge"] = ownership_challenge
if zone_id is None and not opts.urn:
raise TypeError("Missing required property 'zone_id'")
__props__.__dict__["zone_id"] = zone_id
super(LogpushJob, __self__).__init__(
'cloudflare:index/logpushJob:LogpushJob',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
dataset: Optional[pulumi.Input[str]] = None,
destination_conf: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
logpull_options: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_challenge: Optional[pulumi.Input[str]] = None,
zone_id: Optional[pulumi.Input[str]] = None) -> 'LogpushJob':
"""
Get an existing LogpushJob resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dataset: Which type of dataset resource to use. Available values are `"firewall_events"`, `"http_requests"`, `"spectrum_events"` and `"nel_reports"`.
:param pulumi.Input[str] destination_conf: Uniquely identifies a resource (such as an s3 bucket) where data will be pushed. Additional configuration parameters supported by the destination may be included. See [Logpush destination documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#destination).
:param pulumi.Input[bool] enabled: Whether to enable the job.
:param pulumi.Input[str] logpull_options: Configuration string for the Logshare API. It specifies things like requested fields and timestamp formats. See [Logpull options documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#options).
:param pulumi.Input[str] name: The name of the logpush job to create. Must match the regular expression `^[a-zA-Z0-9\-\.]*$`.
:param pulumi.Input[str] ownership_challenge: Ownership challenge token to prove destination ownership, required when destination is Amazon S3, Google Cloud Storage,
Microsoft Azure or Sumo Logic. See [Developer documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#usage).
:param pulumi.Input[str] zone_id: The zone ID where the logpush job should be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LogpushJobState.__new__(_LogpushJobState)
__props__.__dict__["dataset"] = dataset
__props__.__dict__["destination_conf"] = destination_conf
__props__.__dict__["enabled"] = enabled
__props__.__dict__["logpull_options"] = logpull_options
__props__.__dict__["name"] = name
__props__.__dict__["ownership_challenge"] = ownership_challenge
__props__.__dict__["zone_id"] = zone_id
return LogpushJob(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def dataset(self) -> pulumi.Output[str]:
"""
Which type of dataset resource to use. Available values are `"firewall_events"`, `"http_requests"`, `"spectrum_events"` and `"nel_reports"`.
"""
return pulumi.get(self, "dataset")
@property
@pulumi.getter(name="destinationConf")
def destination_conf(self) -> pulumi.Output[str]:
"""
Uniquely identifies a resource (such as an s3 bucket) where data will be pushed. Additional configuration parameters supported by the destination may be included. See [Logpush destination documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#destination).
"""
return pulumi.get(self, "destination_conf")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to enable the job.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="logpullOptions")
def logpull_options(self) -> pulumi.Output[Optional[str]]:
"""
Configuration string for the Logshare API. It specifies things like requested fields and timestamp formats. See [Logpull options documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#options).
"""
return pulumi.get(self, "logpull_options")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the logpush job to create. Must match the regular expression `^[a-zA-Z0-9\-\.]*$`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ownershipChallenge")
def ownership_challenge(self) -> pulumi.Output[Optional[str]]:
"""
Ownership challenge token to prove destination ownership, required when destination is Amazon S3, Google Cloud Storage,
Microsoft Azure or Sumo Logic. See [Developer documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#usage).
"""
return pulumi.get(self, "ownership_challenge")
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> pulumi.Output[str]:
"""
The zone ID where the logpush job should be created.
"""
return pulumi.get(self, "zone_id")
| 54.050228
| 367
| 0.678339
| 2,778
| 23,674
| 5.612671
| 0.074874
| 0.067022
| 0.071832
| 0.063494
| 0.892124
| 0.873461
| 0.857491
| 0.838956
| 0.834531
| 0.820357
| 0
| 0.001561
| 0.215131
| 23,674
| 437
| 368
| 54.173913
| 0.837576
| 0.441032
| 0
| 0.722433
| 1
| 0
| 0.095855
| 0.003095
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159696
| false
| 0.003802
| 0.019011
| 0
| 0.273764
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e4911746a360397602d4823372d2b73e519e7dfb
| 149
|
py
|
Python
|
tests/distributed/test_against_external_daemon/tf_encoder_ws/tf_encoder.py
|
Karnak123/jina
|
9eba4feb3afa2e49c779b46f77c5022bdcb944aa
|
[
"Apache-2.0"
] | 4
|
2021-06-29T04:14:31.000Z
|
2021-08-01T07:01:08.000Z
|
tests/distributed/test_against_external_daemon/tf_encoder_ws/tf_encoder.py
|
Karnak123/jina
|
9eba4feb3afa2e49c779b46f77c5022bdcb944aa
|
[
"Apache-2.0"
] | 1
|
2021-12-25T09:06:13.000Z
|
2021-12-25T09:06:13.000Z
|
tests/distributed/test_against_external_daemon/tf_encoder_ws/tf_encoder.py
|
Karnak123/jina
|
9eba4feb3afa2e49c779b46f77c5022bdcb944aa
|
[
"Apache-2.0"
] | 1
|
2021-06-29T17:03:24.000Z
|
2021-06-29T17:03:24.000Z
|
from jina import Executor, requests
class TFEncoder(Executor):
@requests
def foo(*args, **kwargs):
import tensorflow
pass
| 14.9
| 35
| 0.644295
| 16
| 149
| 6
| 0.8125
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275168
| 149
| 9
| 36
| 16.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0.166667
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
e49f2695797f3975634eb3a99ddc39ebcff08381
| 8,158
|
py
|
Python
|
scheduler/period_scheduler.py
|
CptnDuras/saltshaker_api
|
c89d91b95a8930364c125e286b94f1e7f060fef0
|
[
"MIT"
] | 115
|
2018-03-26T11:27:49.000Z
|
2019-06-06T16:03:43.000Z
|
scheduler/period_scheduler.py
|
CptnDuras/saltshaker_api
|
c89d91b95a8930364c125e286b94f1e7f060fef0
|
[
"MIT"
] | 16
|
2019-07-10T10:09:23.000Z
|
2021-07-02T10:30:48.000Z
|
scheduler/period_scheduler.py
|
CptnDuras/saltshaker_api
|
c89d91b95a8930364c125e286b94f1e7f060fef0
|
[
"MIT"
] | 52
|
2019-06-27T02:58:30.000Z
|
2021-11-12T03:36:05.000Z
|
# -*- coding:utf-8 -*-
from common.log import loggers
from extensions import scheduler
import time
logger = loggers()
# 添加一次性定时执行
def scheduler_timing_add(period_id, product_id, user, run_date):
# run_date="2018-07-04 14:01:00"
try:
scheduler.add_job(func="tasks.tasks:job", trigger='date', run_date=run_date,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Add timing scheduler error: %s" % e)
return {"status": False, "message": str(e)}
# 修改一次性定时执行
def scheduler_timing_modify(period_id, product_id, user, run_date):
# run_date="2018-07-04 14:01:00"
try:
scheduler.modify_job(func="tasks.tasks:job", trigger='date', run_date=run_date,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Modify timing scheduler error: %s" % e)
# 如果之前的scheduler已经不存在,重新添加定时任务
scheduler_result = scheduler_timing_add(period_id, product_id, user, run_date)
if scheduler_result.get("status") is not True:
return {"status": False, "message": scheduler_result.get("message")}
else:
return {"status": True, "message": ""}
# 添加周期间隔任务
def scheduler_interval_add(period_id, product_id, user, run_interval, interval):
if interval == "second":
try:
scheduler.add_job(func="tasks.tasks:job", trigger='interval', seconds=run_interval,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Add second period scheduler error: %s" % e)
return {"status": False, "message": str(e)}
elif interval == "minute":
try:
scheduler.add_job(func="tasks.tasks:job", trigger='interval', minutes=run_interval,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Add minute period scheduler error: %s" % e)
return {"status": False, "message": str(e)}
elif interval == "hour":
try:
scheduler.add_job(func="tasks.tasks:job", trigger='interval', hours=run_interval,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Add hour period scheduler error: %s" % e)
return {"status": False, "message": str(e)}
elif interval == "day":
try:
scheduler.add_job(func="tasks.tasks:job", trigger='interval', days=run_interval,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Add day period scheduler error: %s" % e)
return {"status": False, "message": str(e)}
elif interval == "week":
try:
scheduler.add_job(func="tasks.tasks:job", trigger='interval', weeks=run_interval,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Add week period scheduler error: %s" % e)
return {"status": False, "message": str(e)}
else:
return {"status": False, "message": "No interval specified"}
# 修改周期间隔任务
def scheduler_interval_modify(period_id, product_id, user, run_interval, interval):
if interval == "second":
try:
scheduler.modify_job(func="tasks.tasks:job", trigger='interval', seconds=run_interval,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Modify second period scheduler error: %s" % e)
scheduler_result = scheduler_interval_add(period_id, product_id, user, run_interval, interval)
if scheduler_result.get("status") is not True:
return {"status": False, "message": scheduler_result.get("message")}
else:
return {"status": True, "message": ""}
elif interval == "minute":
try:
scheduler.modify_job(func="tasks.tasks:job", trigger='interval', minutes=run_interval,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Modify minute period scheduler error: %s" % e)
scheduler_result = scheduler_interval_add(period_id, product_id, user, run_interval, interval)
if scheduler_result.get("status") is not True:
return {"status": False, "message": scheduler_result.get("message")}
else:
return {"status": True, "message": ""}
elif interval == "hour":
try:
scheduler.modify_job(func="tasks.tasks:job", trigger='interval', hours=run_interval,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Modify hour period scheduler error: %s" % e)
scheduler_result = scheduler_interval_add(period_id, product_id, user, run_interval, interval)
if scheduler_result.get("status") is not True:
return {"status": False, "message": scheduler_result.get("message")}
else:
return {"status": True, "message": ""}
elif interval == "day":
try:
scheduler.modify_job(func="tasks.tasks:job", trigger='interval', days=run_interval,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Modify day period scheduler error: %s" % e)
scheduler_result = scheduler_interval_add(period_id, product_id, user, run_interval, interval)
if scheduler_result.get("status") is not True:
return {"status": False, "message": scheduler_result.get("message")}
else:
return {"status": True, "message": ""}
elif interval == "week":
try:
scheduler.modify_job(func="tasks.tasks:job", trigger='interval', weeks=run_interval,
args=[period_id, product_id, user], id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Modify week period scheduler error: %s" % e)
scheduler_result = scheduler_interval_add(period_id, product_id, user, run_interval, interval)
if scheduler_result.get("status") is not True:
return {"status": False, "message": scheduler_result.get("message")}
else:
return {"status": True, "message": ""}
else:
return {"status": False, "message": "No interval specified"}
# 删除任务
def scheduler_delete(period_id):
try:
scheduler.delete_job(id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Delete period scheduler error: %s" % e)
return {"status": False, "message": str(e)}
# 暂停任务
def scheduler_pause(period_id):
try:
scheduler.pause_job(id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Pause period scheduler error: %s" % e)
return {"status": False, "message": str(e)}
# 恢复任务
def scheduler_resume(period_id):
try:
scheduler.resume_job(id=period_id)
return {"status": True, "message": ""}
except Exception as e:
logger.error("Resume period scheduler error: %s" % e)
return {"status": False, "message": str(e)}
| 46.090395
| 106
| 0.589238
| 942
| 8,158
| 4.950106
| 0.082803
| 0.068625
| 0.07077
| 0.080206
| 0.921295
| 0.907999
| 0.898134
| 0.895561
| 0.875402
| 0.873472
| 0
| 0.004934
| 0.279603
| 8,158
| 177
| 107
| 46.090395
| 0.788498
| 0.020103
| 0
| 0.721854
| 0
| 0
| 0.182889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046358
| false
| 0
| 0.019868
| 0
| 0.317881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
90397596389f9c7fffcb6bf3d24b40cd9afa2767
| 63,181
|
py
|
Python
|
great_international/migrations/0082_auto_20190913_1339.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2018-03-20T11:19:07.000Z
|
2021-10-05T07:53:11.000Z
|
great_international/migrations/0082_auto_20190913_1339.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 802
|
2018-02-05T14:16:13.000Z
|
2022-02-10T10:59:21.000Z
|
great_international/migrations/0082_auto_20190913_1339.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2019-01-22T13:19:37.000Z
|
2019-07-01T10:35:26.000Z
|
# Generated by Django 2.2.4 on 2019-09-13 13:39
import core.model_fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('great_international', '0081_auto_20190910_1233'),
]
operations = [
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_four_image_alt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_four_image_alt_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_four_image_alt_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_four_image_alt_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_four_image_alt_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_four_image_alt_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_four_image_alt_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_four_image_alt_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_four_image_alt_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_one_image_alt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_one_image_alt_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_one_image_alt_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_one_image_alt_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_one_image_alt_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_one_image_alt_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_one_image_alt_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_one_image_alt_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_one_image_alt_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_three_image_alt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_three_image_alt_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_three_image_alt_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_three_image_alt_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_three_image_alt_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_three_image_alt_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_three_image_alt_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_three_image_alt_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_three_image_alt_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_two_image_alt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_two_image_alt_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_two_image_alt_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_two_image_alt_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_two_image_alt_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_two_image_alt_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_two_image_alt_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_two_image_alt_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='case_study_two_image_alt_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_eight',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_eight_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_eight_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_eight_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_eight_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_eight_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_eight_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_eight_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_eight_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_five',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_five_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_five_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_five_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_five_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_five_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_five_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_five_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_five_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_four',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_four_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_four_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_four_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_four_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_four_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_four_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_four_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_four_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_one',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_one_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_one_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_one_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_one_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_one_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_one_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_one_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_one_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_seven',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_seven_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_seven_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_seven_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_seven_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_seven_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_seven_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_seven_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_seven_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_six',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_six_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_six_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_six_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_six_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_six_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_six_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_six_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_six_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_video_transcript',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_video_transcript_ar',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_video_transcript_de',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_video_transcript_en_gb',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_video_transcript_es',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_video_transcript_fr',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_video_transcript_ja',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_video_transcript_pt',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_video_transcript_zh_hans',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_image_alt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_image_alt_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_image_alt_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_image_alt_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_image_alt_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_image_alt_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_image_alt_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_image_alt_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_image_alt_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_video_transcript',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_video_transcript_ar',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_video_transcript_de',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_video_transcript_en_gb',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_video_transcript_es',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_video_transcript_fr',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_video_transcript_ja',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_video_transcript_pt',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_two_video_transcript_zh_hans',
field=core.model_fields.MarkdownField(blank=True, help_text='If the video is present, a transcript must be provided.', null=True),
),
]
| 81.001282
| 264
| 0.703107
| 8,607
| 63,181
| 5.029046
| 0.011038
| 0.063625
| 0.081298
| 0.095437
| 0.995795
| 0.995795
| 0.995795
| 0.995795
| 0.995795
| 0.995795
| 0
| 0.000631
| 0.22301
| 63,181
| 779
| 265
| 81.105263
| 0.881098
| 0.000712
| 0
| 0.791721
| 1
| 0.174644
| 0.6053
| 0.178763
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.002587
| 0
| 0.006468
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5f55ec135d26727d3b92658cb2ff2ab7f6200696
| 43,480
|
py
|
Python
|
Video-Person-ReID/models/ResNet.py
|
anurag3/2019-CVPR-AIC-Track-2-UWIPL
|
61ee2c96611e10fe51a52033b1cd0e2804d544ca
|
[
"MIT"
] | 20
|
2019-06-05T08:43:26.000Z
|
2021-12-07T08:48:18.000Z
|
Video-Person-ReID/models/ResNet.py
|
yizhou-wang/2019-CVPR-AIC-Track-2-UWIPL-ETRI
|
387924b1e33e0594977cd095c26a147e4a7f8192
|
[
"MIT"
] | 8
|
2019-10-05T11:17:11.000Z
|
2020-04-04T00:40:20.000Z
|
Video-Person-ReID/models/ResNet.py
|
yizhou-wang/2019-CVPR-AIC-Track-2-UWIPL-ETRI
|
387924b1e33e0594977cd095c26a147e4a7f8192
|
[
"MIT"
] | 14
|
2019-06-16T23:09:15.000Z
|
2021-09-13T08:36:50.000Z
|
from __future__ import absolute_import
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
import torchvision
#from models.lightcnn import * # JR
#import torch.nn.parallel.data_parallel as DataParallel # JR
from collections import OrderedDict # JR
__all__ = ['ResNet50TP', 'ResNet50TA', 'myResNet50TA', 'ResNet50RNN', 'ResNet50TP_ORIENTATION', 'ResNet50TP_ORIENTATION_IOU', 'ResNet50TA_ORIENTATION', 'ResNet50TA_ORIENTATION_IOU', 'ResNet50TA_SURFACE', 'ResNet50TA_SURFACE_NU', 'ResNet50TA_SURFACE_NU4', 'ResNet50TA_SURFACE_NU2', 'ResNet50TA_SURFACE_NU2F1', 'ResNet50TA_SURFACE_N1', 'ResNet50TA_SURFACE_N2']
class ResNet50TP_ORIENTATION_IOU(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TP_ORIENTATION_IOU, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.feat_dim = 2048
self.classifier = nn.Linear(self.feat_dim + 2, num_classes)
def forward(self, x, orientation, frame_IOU):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, -1)
x = x.permute(0, 2, 1)
f = F.avg_pool1d(x, t)
f = f.view(b, self.feat_dim)
orientation = orientation.view(f.size(0), -1)
orientation = orientation.float()
frame_IOU = frame_IOU.view(f.size(0), -1)
frame_IOU = frame_IOU.float()
f = torch.cat((f, orientation, frame_IOU), 1)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TP_ORIENTATION(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TP_ORIENTATION, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.feat_dim = 2048
self.classifier = nn.Linear(self.feat_dim + 1, num_classes)
def forward(self, x, orientation):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, -1)
x = x.permute(0, 2, 1)
f = F.avg_pool1d(x, t)
f = f.view(b, self.feat_dim)
#f = torch.LongTensor(f)
#f = f.long()
#import pdb; pdb.set_trace()
orientation = orientation.view(f.size(0), -1)
#import pdb; pdb.set_trace()
orientation = orientation.float()
#import pdb; pdb.set_trace()
#import pdb; pdb.set_trace()
f = torch.cat((f, orientation), 1)
#import pdb; pdb.set_trace()
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TP(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TP, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.feat_dim = 2048
self.classifier = nn.Linear(self.feat_dim, num_classes)
def forward(self, x):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, -1)
x = x.permute(0, 2, 1)
f = F.avg_pool1d(x, t)
f = f.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA_ORIENTATION_IOU1(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA_ORIENTATION_IOU, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
# self.classifier = nn.Linear(self.feat_dim+8*8, num_classes) #seq leng=4 is +8
self.classifier = nn.Linear(self.feat_dim + 8, num_classes) # seq leng=4 is +8
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [5, 3]) # 7,4 cooresponds to 224, 112 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim, 1, 3, padding=1)
def forward(self, x, orientation, frame_IOU):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
# print("X2",x.shape)
#import pdb; pdb.set_trace()
x = self.base(x)
# print("X3",x.shape)
# print("Ori",orientation.shape)
#import pdb; pdb.set_trace()
a = F.relu(self.attention_conv(x))
# print("a1",a.shape)
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
a = a.permute(0, 2, 1)
#import pdb; pdb.set_trace()
a = F.relu(self.attention_tconv(a))
# print("a2",a.shape)
#import pdb; pdb.set_trace()
a = a.view(b, t)
# print("a3",a.shape)
x = F.avg_pool2d(x, x.size()[2:])
# print("x1",x.shape)
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
#import pdb; pdb.set_trace()
#x = x.view(b,-1)
#import pdb; pdb.set_trace()
#x = torch.cat((x,orientation,frame_IOU),1)
# print("a4",a.shape)
x = x.view(b, t, -1)
# print("x2",x.shape)
orientation = orientation.view(x.size(0), -1)
# print("ori2",orientation.shape)
#import pdb; pdb.set_trace()
orientation = orientation.float()
frame_IOU = frame_IOU.view(x.size(0), -1)
frame_IOU = frame_IOU.float()
for i in range(1, t + 1):
orientation = torch.stack((orientation, orientation))
frame_IOU = torch.stack((frame_IOU, frame_IOU))
orientation = orientation.view(b, t, -1)
frame_IOU = frame_IOU.view(b, t, -1)
#import pdb; pdb.set_trace()
x = torch.cat((x, orientation, frame_IOU), 2)
#import pdb; pdb.set_trace()
a = torch.unsqueeze(a, -1)
#a = a.expand(b, t, self.feat_dim)
a = a.expand(b, t, x.size(2))
#import pdb; pdb.set_trace()
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
#f = att_x.view(b,self.feat_dim)
f = att_x.view(b, x.size(2))
#import pdb; pdb.set_trace()
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA_ORIENTATION_IOU(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA_ORIENTATION_IOU, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.classifier = nn.Linear(self.feat_dim + 8, num_classes) # seq leng=4 is +8 =8 is 8*8
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [5, 3]) # 7,4 cooresponds to 224, 112 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim, 1, 3, padding=1)
def forward(self, x, orientation, frame_IOU):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
#import pdb; pdb.set_trace()
x = self.base(x)
#import pdb; pdb.set_trace()
a = F.relu(self.attention_conv(x))
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
a = a.permute(0, 2, 1)
#import pdb; pdb.set_trace()
a = F.relu(self.attention_tconv(a))
#import pdb; pdb.set_trace()
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
#import pdb; pdb.set_trace()
#x = x.view(b,-1)
#import pdb; pdb.set_trace()
#x = torch.cat((x,orientation,frame_IOU),1)
x = x.view(b, t, -1)
orientation = orientation.view(x.size(0), -1)
orientation = orientation.float()
frame_IOU = frame_IOU.view(x.size(0), -1)
frame_IOU = frame_IOU.float()
for i in range(1, t + 1):
orientation = torch.stack((orientation, orientation))
frame_IOU = torch.stack((frame_IOU, frame_IOU))
orientation = orientation.view(b, t, -1)
frame_IOU = frame_IOU.view(b, t, -1)
#import pdb; pdb.set_trace()
x = torch.cat((x, orientation, frame_IOU), 2)
#import pdb; pdb.set_trace()
a = torch.unsqueeze(a, -1)
#a = a.expand(b, t, self.feat_dim)
a = a.expand(b, t, x.size(2))
#import pdb; pdb.set_trace()
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
#f = att_x.view(b,self.feat_dim)
f = att_x.view(b, x.size(2))
#import pdb; pdb.set_trace()
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA_ORIENTATION(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA_ORIENTATION, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.classifier = nn.Linear(self.feat_dim + 1, num_classes)
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7, 4]) # 7,4 cooresponds to 224, 112 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim, 1, 3, padding=1)
def forward(self, x, orientation):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
a = F.relu(self.attention_conv(x))
a = a.view(b, t, self.middle_dim)
a = a.permute(0, 2, 1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
orientation = orientation.view(x.size(0), -1)
orientation = orientation.float()
orientation = torch.unsqueeze(orientation, -1)
x = torch.cat((x, orientation), 1)
#import pdb; pdb.set_trace()
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.feat_dim)
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
f = att_x.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA_SURFACE(nn.Module):
def __init__(self, num_classes, loss={'xent'}, augf_surface=False, **kwargs):
super(ResNet50TA_SURFACE, self).__init__()
self.loss = loss
self.augf_surface = augf_surface
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.surface_dim = 18 # surface feature dimension
self.classifier = nn.Linear(self.feat_dim, num_classes)
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [5,3]) # 5,3 cooresponds to 150, 75 input image size
#self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [3, 5]) # 3,5 cooresponds to 75, 150 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7, 7]) # 7,7 cooresponds to 224, 224 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim + self.surface_dim, 1, 3, padding=1)
def forward(self, x, surface):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
# print(x)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
a = F.relu(self.attention_conv(x))
#m = nn.ReLU6() ####### !!!!! optional scale
#a = m(self.attention_conv(x)) ####### !!!!! optional scale
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
# append surface feature
surface = surface.view(b, t, self.surface_dim)
#surface = torch.mul(surface, 6) ####### !!!!! optional scale
a = torch.cat((a, surface), 2)
a = a.permute(0, 2, 1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.feat_dim)
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
f = att_x.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.augf_surface:
surface = surface.permute(0, 2, 1)
surface = F.avg_pool1d(surface, kernel_size=t)
surface = surface.view(b, self.surface_dim)
if self.loss == {'xent'}:
return y if not self.augf_surface else (y, surface)
elif self.loss == {'xent', 'htri'}:
return (y, f) if not self.augf_surface else (y, f, surface)
elif self.loss == {'cent'}:
return (y, f) if not self.augf_surface else (y, f, surface)
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA_SURFACE_NU(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA_SURFACE_NU, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.surface_dim = 18 # surface feature dimension
self.classifier = nn.Linear(self.feat_dim, num_classes)
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [5,3]) # 5,3 cooresponds to 150, 75 input image size
#self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [3, 5]) # 3,5 cooresponds to 75, 150 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7, 7]) # 7,7 cooresponds to 224, 224 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim + self.surface_dim, 1, 3, padding=1)
self.nu_surface = nn.Linear(self.surface_dim, self.feat_dim, bias=True)
def forward(self, x, surface):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
# print(x)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
a = F.relu(self.attention_conv(x))
#m = nn.ReLU6() ####### !!!!! optional scale
#a = m(self.attention_conv(x)) ####### !!!!! optional scale
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
# append surface feature
surface = surface.view(b, t, self.surface_dim)
#surface = torch.mul(surface, 6) ####### !!!!! optional scale
a = torch.cat((a, surface), 2)
a = a.permute(0, 2, 1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
## surface_nu
surface = surface.view(b*t, self.surface_dim)
surface_nu = self.nu_surface(surface)
surface_nu = surface_nu.view(b, t, -1)
#surface_nu = torch.mul(surface_nu, 100) # optional scaling
x = x.add(surface_nu)
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.feat_dim)
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
f = att_x.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA_SURFACE_NU4(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA_SURFACE_NU4, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.surface_dim = 18 # surface feature dimension
self.classifier = nn.Linear(self.feat_dim, num_classes)
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [5,3]) # 5,3 cooresponds to 150, 75 input image size
#self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [3, 5]) # 3,5 cooresponds to 75, 150 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7, 7]) # 7,7 cooresponds to 224, 224 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim + self.surface_dim, 1, 3, padding=1)
self.nu_surface = nn.Linear(self.surface_dim, self.feat_dim, bias=True)
def forward(self, x, surface):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
# print(x)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
a = F.relu(self.attention_conv(x))
#m = nn.ReLU6() ####### !!!!! optional scale
#a = m(self.attention_conv(x)) ####### !!!!! optional scale
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
# append surface feature
surface = surface.view(b, t, self.surface_dim)
#surface = torch.mul(surface, 6) ####### !!!!! optional scale
a = torch.cat((a, surface), 2)
a = a.permute(0, 2, 1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
## surface_nu
surface = surface.view(b*t, self.surface_dim)
surface_nu = F.relu(self.nu_surface(surface)) ### the only difference
surface_nu = surface_nu.view(b, t, -1)
#surface_nu = torch.mul(surface_nu, 100) # optional scaling
x = x.add(surface_nu)
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.feat_dim)
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
f = att_x.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA_SURFACE_NU2(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA_SURFACE_NU2, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.surface_dim = 18 # surface feature dimension
self.surface_dim_middle = 256 # surface feature dimension
self.classifier = nn.Linear(self.feat_dim, num_classes)
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [5,3]) # 5,3 cooresponds to 150, 75 input image size
#self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [3, 5]) # 3,5 cooresponds to 75, 150 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7, 7]) # 7,7 cooresponds to 224, 224 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim + self.surface_dim, 1, 3, padding=1)
self.nu_surface_1 = nn.Linear(self.surface_dim, self.surface_dim_middle, bias=True)
self.nu_surface_2 = nn.Linear(self.surface_dim_middle, self.feat_dim, bias=True)
def forward(self, x, surface):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
# print(x)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
a = F.relu(self.attention_conv(x))
#m = nn.ReLU6() ####### !!!!! optional scale
#a = m(self.attention_conv(x)) ####### !!!!! optional scale
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
# append surface feature
surface = surface.view(b, t, self.surface_dim)
#surface = torch.mul(surface, 6) ####### !!!!! optional scale
a = torch.cat((a, surface), 2)
a = a.permute(0, 2, 1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
## surface_nu
surface = surface.view(b*t, self.surface_dim)
surface_nu = F.relu(self.nu_surface_1(surface))
surface_nu = F.relu(self.nu_surface_2(surface_nu))
surface_nu = surface_nu.view(b, t, -1)
#surface_nu = torch.mul(surface_nu, 100) # optional scaling
x = x.add(surface_nu)
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.feat_dim)
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
f = att_x.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA_SURFACE_NU2F1(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA_SURFACE_NU2F1, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.surface_dim = 18 # surface feature dimension
self.surface_dim_middle = 256 # surface feature dimension
self.classifier = nn.Linear(self.feat_dim, num_classes)
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [5,3]) # 5,3 cooresponds to 150, 75 input image size
#self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [3, 5]) # 3,5 cooresponds to 75, 150 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7, 7]) # 7,7 cooresponds to 224, 224 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim + self.surface_dim, 1, 3, padding=1)
self.nu_surface_1 = nn.Linear(self.surface_dim, self.surface_dim_middle, bias=True)
self.nu_surface_2 = nn.Linear(self.surface_dim_middle, self.feat_dim, bias=True)
self.nu_surface_f1 = nn.Linear(self.feat_dim, self.feat_dim, bias=True)
def forward(self, x, surface):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
# print(x)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
a = F.relu(self.attention_conv(x))
#m = nn.ReLU6() ####### !!!!! optional scale
#a = m(self.attention_conv(x)) ####### !!!!! optional scale
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
# append surface feature
surface = surface.view(b, t, self.surface_dim)
#surface = torch.mul(surface, 6) ####### !!!!! optional scale
a = torch.cat((a, surface), 2)
a = a.permute(0, 2, 1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
## surface_nu
surface = surface.view(b*t, self.surface_dim)
surface_nu = F.relu(self.nu_surface_1(surface))
surface_nu = F.relu(self.nu_surface_2(surface_nu))
surface_nu = surface_nu.view(b, t, -1)
#surface_nu = torch.mul(surface_nu, 100) # optional scaling
x = x.add(surface_nu)
x = F.relu(self.nu_surface_f1(x))
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.feat_dim)
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
f = att_x.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA_SURFACE_N1(nn.Module): # add surface by 1
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA_SURFACE_N1, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.surface_dim = 18 # surface feature dimension
self.classifier = nn.Linear(self.feat_dim, num_classes)
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [5,3]) # 5,3 cooresponds to 150, 75 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [3, 5]) # 3,5 cooresponds to 75, 150 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim + self.surface_dim, 1, 3, padding=1)
def forward(self, x, surface):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
# print(x)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
a = F.relu(self.attention_conv(x))
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
# append surface feature
surface = surface.view(b, t, self.surface_dim)
#surface = torch.add(surface, 1) # add surface by 1 # obsolete
m = torch.nn.Threshold(0, 0) # take positive
surface = m(surface)
surface = torch.mul(surface, pow(2, 0.5)) # multiply by 2^0.5
a = torch.cat((a, surface), 2)
a = a.permute(0, 2, 1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.feat_dim)
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
f = att_x.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA_SURFACE_N2(nn.Module): # take abs and expand size
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA_SURFACE_N2, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.surface_dim = 18 # surface feature dimension
self.classifier = nn.Linear(self.feat_dim, num_classes)
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [5,3]) # 5,3 cooresponds to 150, 75 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [3, 5]) # 3,5 cooresponds to 75, 150 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim + self.surface_dim*2, 1, 3, padding=1)
def forward(self, x, surface):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
# print(x)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
a = F.relu(self.attention_conv(x))
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
# append surface feature
surface = surface.view(b, t, self.surface_dim)
surface_n = torch.mul(surface, -1) # multiply by -1
surface_c = torch.cat((surface, surface_n), 2)
m = torch.nn.Threshold(0, 0) # take positive
surface_c = m(surface_c)
a = torch.cat((a, surface_c), 2)
a = a.permute(0, 2, 1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.feat_dim)
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
f = att_x.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.classifier = nn.Linear(self.feat_dim, num_classes)
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
# self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [5,3]) # 5,3 cooresponds to 150, 75 input image size
#self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [3, 5]) # 3,5 cooresponds to 75, 150 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7, 7]) # 7,7 cooresponds to 224, 224 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim, 1, 3, padding=1)
def forward(self, x):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
# print(x)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
a = F.relu(self.attention_conv(x))
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
a = a.permute(0, 2, 1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self. att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.feat_dim)
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
f = att_x.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50RNN(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50r, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.hidden_dim = 512
self.feat_dim = 2048
self.classifier = nn.Linear(self.hidden_dim, num_classes)
self.lstm = nn.LSTM(input_size=self.feat_dim, hidden_size=self.hidden_dim, num_layers=1, batch_first=True)
def forward(self, x):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, -1)
output, (h_n, c_n) = self.lstm(x)
output = output.permute(0, 2, 1)
f = F.avg_pool1d(output, t)
f = f.view(b, self.hidden_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
# JR
class myResNet50TA(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(myResNet50TA, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
#state_dict=torch.load('/hd/Jiarui/pytorch-multi-label-classifier-master/aic/trainer_aic0225/Train/epoch_25_snapshot.pth')
state_dict=torch.load('/media/twhuang/NewVolume1/0219AIC_attentionReID/Video-Person-ReID-master2/epoch_25_snapshot.pth')
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if 'base' in k:
name = k[10:] # remove `module.
new_state_dict[name] = v
else:
pass
resnet50.load_state_dict(new_state_dict)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
print(self.base)
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.classifier = nn.Linear(self.feat_dim, num_classes)
#self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,4]) # 7,4 cooresponds to 224, 112 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7,7]) # 7,4 cooresponds to 150, 75 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim, 1, 3, padding=1)
def forward(self, x):
b = x.size(0)
t = x.size(1)
#import pdb; pdb.set_trace()
#print(1,x.shape)
x = x.view(b*t, x.size(2), x.size(3), x.size(4))
#print(2,x.shape)
x = self.base(x)
# print(3,x.shape)
a = F.relu(self.attention_conv(x))
#print(4,a.shape)
#import pdb; pdb.set_trace()
a = a.view(b, t, self.middle_dim)
a = a.permute(0,2,1)
#print(5,a.shape)
a = F.relu(self.attention_tconv(a))
#print(6,a.shape)
a = a.view(b, t)
#print(7,a.shape)
x = F.avg_pool2d(x, x.size()[2:])
#print(8,x.shape)
if self. att_gen=='softmax':
a = F.softmax(a, dim=1)
#print(9,a.shape)
elif self.att_gen=='sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
#print(10,a.shape)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
#print(11,x.shape)
a = torch.unsqueeze(a, -1)
#print(12,a.shape)
a = a.expand(b, t, self.feat_dim)
#print(13,a.shape)
att_x = torch.mul(x,a)
#print(14,att_x.shape)
att_x = torch.sum(att_x,1)
#print(15,att_x.shape)
f = att_x.view(b,self.feat_dim)
#print(16,f.shape)
if not self.training:
return f
y = self.classifier(f)
#print(17,y.shape)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
| 40.673527
| 358
| 0.574747
| 6,257
| 43,480
| 3.862074
| 0.036919
| 0.021726
| 0.046886
| 0.030416
| 0.924809
| 0.916243
| 0.912477
| 0.901883
| 0.890006
| 0.879081
| 0
| 0.038333
| 0.281831
| 43,480
| 1,069
| 359
| 40.673527
| 0.735541
| 0.199195
| 0
| 0.900504
| 0
| 0
| 0.052798
| 0.009346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040302
| false
| 0.001259
| 0.008816
| 0
| 0.149874
| 0.001259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
39e19e887f2c1880198e960cb1e07ce109ee0727
| 6,410
|
py
|
Python
|
analyzeapp/assets.py
|
topdeveloper424/DataAnalyzer-Flask-
|
94724d77d993617d362a9c0cfc3f6be458089723
|
[
"BSD-2-Clause"
] | 1
|
2019-07-21T20:03:30.000Z
|
2019-07-21T20:03:30.000Z
|
analyzeapp/assets.py
|
topdeveloper424/DataAnalyzer-Flask-
|
94724d77d993617d362a9c0cfc3f6be458089723
|
[
"BSD-2-Clause"
] | 2
|
2019-07-05T14:59:22.000Z
|
2019-07-05T15:26:52.000Z
|
analyzeapp/assets.py
|
harrybenit424/DataAnalyzer
|
cce6aca1050512374938ae74d0fbea6aeb8b948f
|
[
"BSD-2-Clause"
] | null | null | null |
from flask_assets import Bundle
common_css = Bundle(
'pages/waves/css/waves.min.css',
'bower_components/bootstrap/css/bootstrap.min.css',
'pages/waves/css/waves.min.css',
'icon/themify-icons/themify-icons.css',
'icon/font-awesome/css/font-awesome.min.css',
'css/jquery.mCustomScrollbar.css',
'pages/chart/radial/css/radial.css',
'css/style.css',
filters='cssmin',
output='public/css/common.css'
)
common_js = Bundle(
'bower_components/jquery/js/jquery.min.js',
'bower_components/jquery-ui/js/jquery-ui.min.js',
'bower_components/popper.js/js/popper.min.js',
'bower_components/bootstrap/js/bootstrap.min.js',
'pages/widget/excanvas.js',
'pages/waves/js/waves.min.js',
'bower_components/jquery-slimscroll/js/jquery.slimscroll.js',
'bower_components/modernizr/js/modernizr.js',
'js/SmoothScroll.js',
'js/jquery.mCustomScrollbar.concat.min.js',
'bower_components/chart.js/js/Chart.js',
filters='jsmin',
output = 'public/js/common.js'
)
jschart = Bundle(
'pages/widget/amchart/gauge.min.js',
'pages/widget/amchart/serial.min.js',
'pages/widget/amchart/light.min.js',
'pages/widget/amchart/pie.min.js',
filters='jsmin',
output='public/js/chart.js'
)
jslayout = Bundle(
'js/pcoded.min.js',
'js/vertical/vertical-layout.min.js',
'pages/dashboard/custom-dashboard.js',
'js/script.js',
filters = 'jsmin',
output = 'public/js/layout.js'
)
#file page-------------------------------------------------------------------------------
files_css = Bundle(
'bower_components/bootstrap/css/bootstrap.min.css',
'pages/waves/css/waves.min.css',
'icon/themify-icons/themify-icons.css',
'icon/icofont/css/icofont.css',
'icon/font-awesome/css/font-awesome.min.css',
'css/component.css',
'bower_components/datatables.net-bs4/css/dataTables.bootstrap4.min.css',
'pages/data-table/css/buttons.dataTables.min.css',
'bower_components/datatables.net-responsive-bs4/css/responsive.bootstrap4.min.css',
'css/style.css',
'css/jquery.mCustomScrollbar.css',
filters = 'cssmin',
output = 'public/js/files.css'
)
files_js = Bundle(
'bower_components/jquery/js/jquery.min.js',
'bower_components/jquery/js/jquery.min.js',
'bower_components/popper.js/js/popper.min.js',
'bower_components/bootstrap/js/bootstrap.min.js',
'pages/waves/js/waves.min.js',
'bower_components/jquery-slimscroll/js/jquery.slimscroll.js',
'bower_components/modernizr/js/modernizr.js',
'bower_components/modernizr/js/css-scrollbars.js',
'bower_components/datatables.net/js/jquery.dataTables.min.js',
'bower_components/datatables.net-buttons/js/dataTables.buttons.min.js',
'pages/data-table/js/jszip.min.js',
'pages/data-table/js/pdfmake.min.js',
'pages/data-table/js/vfs_fonts.js',
'bower_components/datatables.net-buttons/js/buttons.print.min.js',
'bower_components/datatables.net-buttons/js/buttons.html5.min.js',
'bower_components/datatables.net-bs4/js/dataTables.bootstrap4.min.js',
'bower_components/datatables.net-responsive/js/dataTables.responsive.min.js',
'bower_components/datatables.net-responsive-bs4/js/responsive.bootstrap4.min.js',
'pages/data-table/js/data-table-custom.js',
'js/pcoded.min.js',
'js/vertical/vertical-layout.min.js',
'js/jquery.mCustomScrollbar.concat.min.js',
'js/script.js',
filters = 'jsmin',
output = 'public/js/files.js'
)
settings_css = Bundle(
'bower_components/bootstrap/css/bootstrap.min.css',
'pages/waves/css/waves.min.css',
'icon/themify-icons/themify-icons.css',
'icon/icofont/css/icofont.css',
'icon/font-awesome/css/font-awesome.min.css',
'css/component.css',
'css/style.css',
'css/jquery.mCustomScrollbar.css',
filters = 'cssmin',
output = 'public/js/settings.css'
)
settings_js = Bundle(
'bower_components/jquery/js/jquery.min.js',
'bower_components/jquery/js/jquery.min.js',
'bower_components/popper.js/js/popper.min.js',
'bower_components/bootstrap/js/bootstrap.min.js',
'pages/waves/js/waves.min.js',
'bower_components/jquery-slimscroll/js/jquery.slimscroll.js',
'bower_components/modernizr/js/modernizr.js',
'bower_components/modernizr/js/css-scrollbars.js',
'bower_components/bootstrap-tagsinput/js/bootstrap-tagsinput.js',
'bower_components/bootstrap-maxlength/js/bootstrap-maxlength.js',
'js/pcoded.min.js',
'js/vertical/vertical-layout.min.js',
'js/jquery.mCustomScrollbar.concat.min.js',
'js/script.js',
filters = 'jsmin',
output = 'public/js/files.js'
)
analyize_css = Bundle(
'bower_components/bootstrap/css/bootstrap.min.css',
'pages/waves/css/waves.min.css',
'icon/themify-icons/themify-icons.css',
'icon/icofont/css/icofont.css',
'icon/font-awesome/css/font-awesome.min.css',
'bower_components/select2/css/select2.min.css',
'css/component.css',
'bower_components/datedropper/css/datedropper.min.css',
'bower_components/datatables.net-bs4/css/dataTables.bootstrap4.min.css',
'pages/data-table/css/buttons.dataTables.min.css',
'css/style.css',
'css/jquery.mCustomScrollbar.css',
filters = 'cssmin',
output = 'public/js/analyize.css'
)
analyize_js = Bundle(
'bower_components/jquery/js/jquery.min.js',
'bower_components/jquery-ui/js/jquery-ui.min.js',
'bower_components/popper.js/js/popper.min.js',
'bower_components/bootstrap/js/bootstrap.min.js',
'pages/widget/excanvas.js',
'pages/waves/js/waves.min.js',
'bower_components/jquery-slimscroll/js/jquery.slimscroll.js',
'bower_components/modernizr/js/modernizr.js',
'bower_components/modernizr/js/css-scrollbars.js',
'bower_components/select2/js/select2.full.min.js',
'bower_components/multiselect/js/jquery.multi-select.js',
'js/jquery.quicksearch.js',
'pages/advance-elements/select2-custom.js',
'pages/advance-elements/moment-with-locales.min.js',
'bower_components/datedropper/js/datedropper.min.js',
'bower_components/datatables.net/js/jquery.dataTables.min.js',
'bower_components/datatables.net-bs4/js/dataTables.bootstrap4.min.js',
'js/pcoded.min.js',
'js/vertical/vertical-layout.min.js',
'js/jquery.mCustomScrollbar.concat.min.js',
'js/script.js',
filters = 'jsmin',
output = 'public/js/analyize.js'
)
| 37.267442
| 89
| 0.701872
| 857
| 6,410
| 5.17853
| 0.10035
| 0.172375
| 0.145561
| 0.11717
| 0.811627
| 0.784137
| 0.735917
| 0.701217
| 0.677783
| 0.66922
| 0
| 0.003191
| 0.119969
| 6,410
| 172
| 90
| 37.267442
| 0.783549
| 0.013729
| 0
| 0.636943
| 0
| 0
| 0.757672
| 0.694717
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006369
| 0
| 0.006369
| 0.006369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
39f339d8c8042c589643013f683e7ed947bd4025
| 61
|
py
|
Python
|
bcwalletx/__init__.py
|
dalijolijo/bcwallet
|
eeb00c3b7a0c4aa12261d1a1e290b289767ec7e1
|
[
"Apache-2.0"
] | null | null | null |
bcwalletx/__init__.py
|
dalijolijo/bcwallet
|
eeb00c3b7a0c4aa12261d1a1e290b289767ec7e1
|
[
"Apache-2.0"
] | null | null | null |
bcwalletx/__init__.py
|
dalijolijo/bcwallet
|
eeb00c3b7a0c4aa12261d1a1e290b289767ec7e1
|
[
"Apache-2.0"
] | null | null | null |
from .bcwalletx import cli
from .bcwalletx import invoke_cli
| 20.333333
| 33
| 0.836066
| 9
| 61
| 5.555556
| 0.555556
| 0.52
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131148
| 61
| 2
| 34
| 30.5
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ffc1c927ef32f552bd46a72686f9fd9f2c9b4165
| 4,907
|
py
|
Python
|
payloads/service_devices_mapping.py
|
bcsr0009/pdtf
|
5d3cc7933ac07457f6b4b59f2d4d70e0de1ffaec
|
[
"MIT"
] | null | null | null |
payloads/service_devices_mapping.py
|
bcsr0009/pdtf
|
5d3cc7933ac07457f6b4b59f2d4d70e0de1ffaec
|
[
"MIT"
] | null | null | null |
payloads/service_devices_mapping.py
|
bcsr0009/pdtf
|
5d3cc7933ac07457f6b4b59f2d4d70e0de1ffaec
|
[
"MIT"
] | null | null | null |
devices_per_service_mapping = {
"1-test": [
{
"deviceusername": "admin",
"devicepassword": "admin",
"requestmethod": "POST",
"createurl": "http://localhost:8080/api/running/dmz/services",
"deleteurl": "http://localhost:8080/api/running/dmz/services/interface-tunnel/Headend,Remote0/",
"devicename": "iso0",
"nsoipaddress": "127.0.0.1",
"nsosshport": 2026,
"nsousername": "admin",
"nsopassword": "admin",
"islsa": false,
"device_running_config_cmd": "enable\nshow running-config",
"ncs_running_config_cmd": "switch cli\nshow running-config devices device {0}",
"device_address_cmd": "switch cli\nshow running-config devices device {0} address",
"device_port_cmd": "switch cli\nshow running-config devices device {0} port",
"issetup": false,
"setupcreateurl": "http://localhost:8080/api/running/dmz/services",
"setupdeleteurl": "http://localhost:8080/api/running/dmz/services/interface-tunnel/Headend,Remote0/",
"setuprequestmethod": "POST"
}
],
"3-test": [
{
"deviceusername": "admin",
"devicepassword": "admin",
"requestmethod": "POST",
"createurl": "http://localhost:8080/api/running/dmz/services",
"deleteurl": "http://localhost:8080/api/running/dmz/services/interface-tunnel/Headend,Remote0/",
"devicename": "iso0",
"nsoipaddress": "127.0.0.1",
"nsosshport": 2026,
"nsousername": "admin",
"nsopassword": "admin",
"islsa": false,
"device_running_config_cmd": "enable\nshow running-config",
"ncs_running_config_cmd": "switch cli\nshow running-config devices device {0}",
"device_address_cmd": "switch cli\nshow running-config devices device {0} address",
"device_port_cmd": "switch cli\nshow running-config devices device {0} port",
"issetup": false,
"setupcreateurl": "http://localhost:8080/api/running/dmz/services",
"setupdeleteurl": "http://localhost:8080/api/running/dmz/services/interface-tunnel/Headend,Remote0/",
"setuprequestmethod": "POST"
}
],
"5-test": [
{
"deviceusername": "admin",
"devicepassword": "admin",
"requestmethod": "POST",
"createurl": "http://localhost:8080/api/running/dmz/services",
"deleteurl": "http://localhost:8080/api/running/dmz/services/interface-tunnel/Headend,Remote0/",
"devicename": "iso0",
"nsoipaddress": "127.0.0.1",
"nsosshport": 2026,
"nsousername": "admin",
"nsopassword": "admin",
"islsa": false,
"device_running_config_cmd": "enable\nshow running-config",
"ncs_running_config_cmd": "switch cli\nshow running-config devices device {0}",
"device_address_cmd": "switch cli\nshow running-config devices device {0} address",
"device_port_cmd": "switch cli\nshow running-config devices device {0} port",
"issetup": false,
"setupcreateurl": "http://localhost:8080/api/running/dmz/services",
"setupdeleteurl": "http://localhost:8080/api/running/dmz/services/interface-tunnel/Headend,Remote0/",
"setuprequestmethod": "POST"
}
],
"2-test": [
{
"deviceusername": "admin",
"devicepassword": "admin",
"requestmethod": "POST",
"createurl": "http://localhost:8080/api/running/dmz/services",
"deleteurl": "http://localhost:8080/api/running/dmz/services/interface-tunnel/Headend,Remote0/",
"devicename": "iso0",
"nsoipaddress": "127.0.0.1",
"nsosshport": 2026,
"nsousername": "admin",
"nsopassword": "admin",
"islsa": false,
"device_running_config_cmd": "enable\nshow running-config",
"ncs_running_config_cmd": "switch cli\nshow running-config devices device {0}",
"device_address_cmd": "switch cli\nshow running-config devices device {0} address",
"device_port_cmd": "switch cli\nshow running-config devices device {0} port",
"issetup": false,
"setupcreateurl": "http://localhost:8080/api/running/dmz/services",
"setupdeleteurl": "http://localhost:8080/api/running/dmz/services/interface-tunnel/Headend,Remote0/",
"setuprequestmethod": "POST"
}
],
"4-test": [
{
"deviceusername": "admin",
"devicepassword": "admin",
"requestmethod": "POST",
"createurl": "http://localhost:8080/api/running/dmz/services",
"deleteurl": "http://localhost:8080/api/running/dmz/services/interface-tunnel/Headend,Remote0/",
"devicename": "iso0",
"nsoipaddress": "127.0.0.1",
"nsosshport": 2026,
"nsousername": "admin",
"nsopassword": "admin",
"islsa": false,
"device_running_config_cmd": "enable\nshow running-config",
"ncs_running_config_cmd": "switch cli\nshow running-config devices device {0}",
"device_address_cmd": "switch cli\nshow running-config devices device {0} address",
"device_port_cmd": "switch cli\nshow running-config devices device {0} port",
"issetup": false,
"setupcreateurl": "http://localhost:8080/api/running/dmz/services",
"setupdeleteurl": "http://localhost:8080/api/running/dmz/services/interface-tunnel/Headend,Remote0/",
"setuprequestmethod": "POST"
}
]
}
| 41.940171
| 104
| 0.694314
| 559
| 4,907
| 6
| 0.094812
| 0.116279
| 0.101372
| 0.119261
| 0.991354
| 0.991354
| 0.991354
| 0.991354
| 0.991354
| 0.991354
| 0
| 0.038515
| 0.126961
| 4,907
| 117
| 105
| 41.940171
| 0.744398
| 0
| 0
| 0.846154
| 0
| 0.08547
| 0.753871
| 0.047881
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.08547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
ffc2170a49598682f9b92ef5cfd2b1248d12bd7d
| 3,469
|
py
|
Python
|
src/training/dataloader.py
|
manhhabui/personalized-breath
|
e8cbb2cd68dbfe78662701922826996b8e42f75e
|
[
"Apache-2.0"
] | 2
|
2020-04-09T15:25:07.000Z
|
2020-04-16T08:28:10.000Z
|
src/training/dataloader.py
|
hamanhbui/personalized-breath
|
1627476ce94b81d1daa1da3f71f80b7274031afd
|
[
"Apache-2.0"
] | null | null | null |
src/training/dataloader.py
|
hamanhbui/personalized-breath
|
1627476ce94b81d1daa1da3f71f80b7274031afd
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from torch.utils.data import Dataset
class Audio_Dataset(Dataset):
def __init__(self,root_dir,filenamelist,old_new_name_map, object_id = None):
self.filenamelist = filenamelist
self.root_dir=root_dir
self.old_new_name_map=old_new_name_map
self.type_name = "Audio"
self.object_id = object_id
def __len__(self):
return len(self.filenamelist)
def data_extracted_loader(self,file_name):
audio_file_name=file_name.replace("processed","processed/audio")
return np.load(audio_file_name+".npy")
def __getitem__(self, index):
if len(self.old_new_name_map)==0:
subject_name=int(self.filenamelist[index].split("/")[2].split("_")[0])-1
else:
subject_name=self.old_new_name_map[self.filenamelist[index].split("/")[2].split("_")[0]]
if self.object_id != None:
if subject_name == self.object_id:
subject_name = 0
else:
subject_name = 1
return self.type_name, self.data_extracted_loader(self.root_dir+self.filenamelist[index]), subject_name
class Acce_Gyro_Dataset(Dataset):
def __init__(self,root_dir,filenamelist,old_new_name_map, object_id = None):
self.filenamelist = filenamelist
self.root_dir=root_dir
self.old_new_name_map=old_new_name_map
self.type_name = "Acce_Gyro"
self.object_id = object_id
def __len__(self):
return len(self.filenamelist)
def data_extracted_loader(self,file_name):
sensor_file_name=file_name.replace("processed","processed/acce-gyro")
return np.load(sensor_file_name+".npy")
def __getitem__(self, index):
if len(self.old_new_name_map)==0:
subject_name=int(self.filenamelist[index].split("/")[2].split("_")[0])-1
else:
subject_name=self.old_new_name_map[self.filenamelist[index].split("/")[2].split("_")[0]]
if self.object_id != None:
if subject_name == self.object_id:
subject_name = 0
else:
subject_name = 1
return self.type_name, self.data_extracted_loader(self.root_dir+self.filenamelist[index]),subject_name
class Multimodality_Dataset(Dataset):
def __init__(self,root_dir,filenamelist,old_new_name_map, object_id = None):
self.filenamelist = filenamelist
self.root_dir=root_dir
self.old_new_name_map=old_new_name_map
self.object_id = object_id
self.type_name = "Multimodality"
def __len__(self):
return len(self.filenamelist)
def data_extracted_loader(self,file_name):
audio_file_name=file_name.replace("processed","processed/audio")
sensor_file_name=file_name.replace("processed","processed/acce-gyro")
return np.load(sensor_file_name+".npy"), np.load(audio_file_name+".npy")
def __getitem__(self, index):
if len(self.old_new_name_map)==0:
subject_name=int(self.filenamelist[index].split("/")[2].split("_")[0])-1
else:
subject_name=self.old_new_name_map[self.filenamelist[index].split("/")[2].split("_")[0]]
if self.object_id != None:
if subject_name == self.object_id:
subject_name = 0
else:
subject_name = 1
return self.type_name, self.data_extracted_loader(self.root_dir+self.filenamelist[index]),subject_name
| 38.544444
| 111
| 0.656962
| 464
| 3,469
| 4.528017
| 0.103448
| 0.094241
| 0.071395
| 0.092813
| 0.942408
| 0.934793
| 0.934793
| 0.934793
| 0.934793
| 0.934793
| 0
| 0.008949
| 0.226867
| 3,469
| 90
| 112
| 38.544444
| 0.774422
| 0
| 0
| 0.847222
| 0
| 0
| 0.045821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.027778
| 0.041667
| 0.361111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0814d70812281c7e9eaab0da815bcd04328b2bba
| 48,612
|
py
|
Python
|
openapi_client/api/default_api.py
|
brighthive/jdx-client-api-python
|
ed94c578a6c9a5e9aadf8764439c22783ac1d9d5
|
[
"Apache-2.0"
] | null | null | null |
openapi_client/api/default_api.py
|
brighthive/jdx-client-api-python
|
ed94c578a6c9a5e9aadf8764439c22783ac1d9d5
|
[
"Apache-2.0"
] | null | null | null |
openapi_client/api/default_api.py
|
brighthive/jdx-client-api-python
|
ed94c578a6c9a5e9aadf8764439c22783ac1d9d5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
JDX reference application API
This is a collection of schemas and endpoints for the various JDX, Concentric Sky facing REST endpoints, the schemas define an API contract of sorts between the request and response expectations of the JDX reference application. This API is to be mutually developed by Concentric Sky and BrightHive. # noqa: E501
The version of the OpenAPI document: 0.0.17
Contact: engineering@brighthive.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import (
ApiTypeError,
ApiValueError
)
class DefaultApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def framework_recommendations_post(self, **kwargs): # noqa: E501
"""Get framework recommendations based on the uploaded job descripton and context. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.framework_recommendations_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Get framework-recommendations for a given Pipeline ID.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: FrameworkRecommendationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.framework_recommendations_post_with_http_info(**kwargs) # noqa: E501
def framework_recommendations_post_with_http_info(self, **kwargs): # noqa: E501
"""Get framework recommendations based on the uploaded job descripton and context. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.framework_recommendations_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Get framework-recommendations for a given Pipeline ID.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(FrameworkRecommendationResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method framework_recommendations_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in local_var_params:
body_params = local_var_params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/framework-recommendations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FrameworkRecommendationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def framework_selections_post(self, **kwargs): # noqa: E501
"""The user indicates what frameworks they selected # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.framework_selections_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param FrameworkSelectionRequest framework_selection_request: framework selections
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Response
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.framework_selections_post_with_http_info(**kwargs) # noqa: E501
def framework_selections_post_with_http_info(self, **kwargs): # noqa: E501
"""The user indicates what frameworks they selected # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.framework_selections_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param FrameworkSelectionRequest framework_selection_request: framework selections
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Response, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['framework_selection_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method framework_selections_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'framework_selection_request' in local_var_params:
body_params = local_var_params['framework_selection_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/framework-selections', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Response', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def generate_job_schema_plus_post(self, **kwargs): # noqa: E501
"""Generate JobSchema+ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.generate_job_schema_plus_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Generate JobSchema+ from a given pipeline_id
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GenerateJobSchemaPlusResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.generate_job_schema_plus_post_with_http_info(**kwargs) # noqa: E501
def generate_job_schema_plus_post_with_http_info(self, **kwargs): # noqa: E501
"""Generate JobSchema+ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.generate_job_schema_plus_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Generate JobSchema+ from a given pipeline_id
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GenerateJobSchemaPlusResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method generate_job_schema_plus_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in local_var_params:
body_params = local_var_params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/generate-job-schema-plus', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GenerateJobSchemaPlusResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_score_post(self, **kwargs): # noqa: E501
"""Provides a scored based on how much metadata you provide and the quality of that data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_score_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Get score for a given Pipeline ID.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PipelineScoreResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_score_post_with_http_info(**kwargs) # noqa: E501
def get_score_post_with_http_info(self, **kwargs): # noqa: E501
"""Provides a scored based on how much metadata you provide and the quality of that data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_score_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Get score for a given Pipeline ID.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PipelineScoreResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_score_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in local_var_params:
body_params = local_var_params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/get-score', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PipelineScoreResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def health_get(self, **kwargs): # noqa: E501
"""Health Check # noqa: E501
The health check endpoint can be used to check if the API is up. If the API is running it will return a 200 OK response. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.health_get(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: HealthResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.health_get_with_http_info(**kwargs) # noqa: E501
def health_get_with_http_info(self, **kwargs): # noqa: E501
"""Health Check # noqa: E501
The health check endpoint can be used to check if the API is up. If the API is running it will return a 200 OK response. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.health_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(HealthResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method health_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/health', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HealthResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def match_table_post(self, **kwargs): # noqa: E501
"""Get the match table associated with the provided `pipelineID` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.match_table_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param MatchTableRequest match_table_request: Get framework-recommendations for a given Pipeline ID.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: MatchTableResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.match_table_post_with_http_info(**kwargs) # noqa: E501
def match_table_post_with_http_info(self, **kwargs): # noqa: E501
"""Get the match table associated with the provided `pipelineID` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.match_table_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param MatchTableRequest match_table_request: Get framework-recommendations for a given Pipeline ID.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(MatchTableResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['match_table_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method match_table_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'match_table_request' in local_var_params:
body_params = local_var_params['match_table_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/match-table', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MatchTableResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def preview_post(self, **kwargs): # noqa: E501
"""Get preview of job description with tagged matches. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.preview_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Get preview of job description wth tagged matches.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PreviewResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.preview_post_with_http_info(**kwargs) # noqa: E501
def preview_post_with_http_info(self, **kwargs): # noqa: E501
"""Get preview of job description with tagged matches. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.preview_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Request request: Get preview of job description wth tagged matches.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PreviewResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method preview_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in local_var_params:
body_params = local_var_params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/preview', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PreviewResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_job_description_context_post(self, **kwargs): # noqa: E501
"""Provide job description context (e.g metadata) on the job description # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_job_description_context_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param JobDescriptionContextRequest job_description_context_request: job description context
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: JobDescriptionContextResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.upload_job_description_context_post_with_http_info(**kwargs) # noqa: E501
def upload_job_description_context_post_with_http_info(self, **kwargs): # noqa: E501
"""Provide job description context (e.g metadata) on the job description # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_job_description_context_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param JobDescriptionContextRequest job_description_context_request: job description context
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(JobDescriptionContextResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['job_description_context_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_job_description_context_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'job_description_context_request' in local_var_params:
body_params = local_var_params['job_description_context_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/upload-job-description-context', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='JobDescriptionContextResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_job_description_file_post(self, **kwargs): # noqa: E501
"""Upload a raw job description file. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_job_description_file_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param file file: The file to upload
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: RawJobDescriptionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.upload_job_description_file_post_with_http_info(**kwargs) # noqa: E501
def upload_job_description_file_post_with_http_info(self, **kwargs): # noqa: E501
"""Upload a raw job description file. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_job_description_file_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param file file: The file to upload
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(RawJobDescriptionResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_job_description_file_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file' in local_var_params:
local_var_files['file'] = local_var_params['file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/upload-job-description-file', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RawJobDescriptionResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def user_actions_post(self, **kwargs): # noqa: E501
"""Provide the user responses as a list of user actions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_actions_post(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param UserActionRequest user_action_request: Contains a list of user responses
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Response
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.user_actions_post_with_http_info(**kwargs) # noqa: E501
def user_actions_post_with_http_info(self, **kwargs): # noqa: E501
"""Provide the user responses as a list of user actions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_actions_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param UserActionRequest user_action_request: Contains a list of user responses
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Response, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['user_action_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method user_actions_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'user_action_request' in local_var_params:
body_params = local_var_params['user_action_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/user-actions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Response', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 44.475755
| 317
| 0.608697
| 5,374
| 48,612
| 5.247674
| 0.045776
| 0.034609
| 0.048651
| 0.031914
| 0.936811
| 0.932768
| 0.925676
| 0.924223
| 0.913833
| 0.908017
| 0
| 0.012175
| 0.320764
| 48,612
| 1,092
| 318
| 44.516484
| 0.841909
| 0.484983
| 0
| 0.795407
| 0
| 0
| 0.16473
| 0.057228
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043841
| false
| 0
| 0.010438
| 0
| 0.098121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f273d805dc542d49b7657c334d5add4cb1cfda75
| 148,695
|
py
|
Python
|
octopus_deploy_swagger_client/octopus_deploy_client/tenants_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/octopus_deploy_client/tenants_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/octopus_deploy_client/tenants_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from octopus_deploy_swagger_client.api_client import ApiClient
class TenantsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_response_descriptor_tenants_tenant_tenant_resource(self, **kwargs): # noqa: E501
"""Create a TenantResource # noqa: E501
Creates a new tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_response_descriptor_tenants_tenant_tenant_resource(async_req=True)
>>> result = thread.get()
:param async_req bool
:param TenantResource tenant_resource: The TenantResource resource to create
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_response_descriptor_tenants_tenant_tenant_resource_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_response_descriptor_tenants_tenant_tenant_resource_with_http_info(**kwargs) # noqa: E501
return data
def create_response_descriptor_tenants_tenant_tenant_resource_with_http_info(self, **kwargs): # noqa: E501
"""Create a TenantResource # noqa: E501
Creates a new tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_response_descriptor_tenants_tenant_tenant_resource_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param TenantResource tenant_resource: The TenantResource resource to create
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tenant_resource'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_response_descriptor_tenants_tenant_tenant_resource" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'tenant_resource' in params:
body_params = params['tenant_resource']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_response_descriptor_tenants_tenant_tenant_resource_spaces(self, base_space_id, **kwargs): # noqa: E501
"""Create a TenantResource # noqa: E501
Creates a new tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_response_descriptor_tenants_tenant_tenant_resource_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param TenantResource tenant_resource: The TenantResource resource to create
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.create_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def create_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""Create a TenantResource # noqa: E501
Creates a new tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param TenantResource tenant_resource: The TenantResource resource to create
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'tenant_resource'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_response_descriptor_tenants_tenant_tenant_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `create_response_descriptor_tenants_tenant_tenant_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'tenant_resource' in params:
body_params = params['tenant_resource']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder # noqa: E501
Gets the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_with_http_info(id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_with_http_info(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder # noqa: E501
Gets the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['image/png']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/{id}/logo', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces # noqa: E501
Gets the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces # noqa: E501
Gets the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_get_responder_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['image/png']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}/logo', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder # noqa: E501
Updates the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_with_http_info(id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_with_http_info(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder # noqa: E501
Updates the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/{id}/logo', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_0(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_0 # noqa: E501
Updates the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_0(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_0_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_0_with_http_info(id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_0_with_http_info(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_0 # noqa: E501
Updates the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_0_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_0" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/{id}/logo', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces # noqa: E501
Updates the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces # noqa: E501
Updates the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}/logo', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0 # noqa: E501
Updates the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0 # noqa: E501
Updates the logo associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_logo_put_responder_spaces_0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}/logo', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action # noqa: E501
Checks tenants for matching tags rule NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_with_http_info(**kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_with_http_info(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action # noqa: E501
Checks tenants for matching tags rule NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/tag-test', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_spaces(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_spaces # noqa: E501
Checks tenants for matching tags rule NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_spaces # noqa: E501
Checks tenants for matching tags rule NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_tag_test_action_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/tag-test', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action # noqa: E501
Checks tenants for matching variable set rule NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_with_http_info(**kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_with_http_info(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action # noqa: E501
Checks tenants for matching variable set rule NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/variableset-test', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces # noqa: E501
Checks tenants for matching variable set rule NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces # noqa: E501
Checks tenants for matching variable set rule NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/variableset-test', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder # noqa: E501
Gets all the available variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_with_http_info(id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_with_http_info(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder # noqa: E501
Gets all the available variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/{id}/variables', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantVariableResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces # noqa: E501
Gets all the available variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces # noqa: E501
Gets all the available variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}/variables', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantVariableResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action # noqa: E501
Returns list of tenants who are missing required variables NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_with_http_info(**kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_with_http_info(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action # noqa: E501
Returns list of tenants who are missing required variables NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/variables-missing', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_spaces(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_spaces # noqa: E501
Returns list of tenants who are missing required variables NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_spaces # noqa: E501
Returns list of tenants who are missing required variables NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/variables-missing', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_with_http_info(id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_with_http_info(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/{id}/variables', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantVariableResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_0(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_0 # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_0(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_0_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_0_with_http_info(id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_0_with_http_info(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_0 # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_0_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_0" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/{id}/variables', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantVariableResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}/variables', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantVariableResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0 # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0 # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}/variables', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantVariableResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action # noqa: E501
Reports back the status of multi-tenancy NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: MultiTenancyStatusResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_with_http_info(**kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_with_http_info(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action # noqa: E501
Reports back the status of multi-tenancy NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: MultiTenancyStatusResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MultiTenancyStatusResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces # noqa: E501
Reports back the status of multi-tenancy NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: MultiTenancyStatusResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces # noqa: E501
Reports back the status of multi-tenancy NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: MultiTenancyStatusResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MultiTenancyStatusResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_on_background_response_descriptor_tenants_tenant_tenant_resource(self, id, **kwargs): # noqa: E501
"""Delete a TenantResource by ID # noqa: E501
Deletes an existing tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_on_background_response_descriptor_tenants_tenant_tenant_resource(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the TenantResource to delete (required)
:return: TaskResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_on_background_response_descriptor_tenants_tenant_tenant_resource_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_on_background_response_descriptor_tenants_tenant_tenant_resource_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_on_background_response_descriptor_tenants_tenant_tenant_resource_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete a TenantResource by ID # noqa: E501
Deletes an existing tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_on_background_response_descriptor_tenants_tenant_tenant_resource_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the TenantResource to delete (required)
:return: TaskResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_on_background_response_descriptor_tenants_tenant_tenant_resource" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_on_background_response_descriptor_tenants_tenant_tenant_resource`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaskResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_on_background_response_descriptor_tenants_tenant_tenant_resource_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""Delete a TenantResource by ID # noqa: E501
Deletes an existing tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_on_background_response_descriptor_tenants_tenant_tenant_resource_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the TenantResource to delete (required)
:return: TaskResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_on_background_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.delete_on_background_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def delete_on_background_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""Delete a TenantResource by ID # noqa: E501
Deletes an existing tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_on_background_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the TenantResource to delete (required)
:return: TaskResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_on_background_response_descriptor_tenants_tenant_tenant_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `delete_on_background_response_descriptor_tenants_tenant_tenant_resource_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_on_background_response_descriptor_tenants_tenant_tenant_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaskResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def index_response_descriptor_tenants_tenant_tenant_resource(self, **kwargs): # noqa: E501
"""Get a list of TenantResources # noqa: E501
Lists all of the tenants in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name, and returned 30 at a time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.index_response_descriptor_tenants_tenant_tenant_resource(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int skip: Number of items to skip
:param int take: Number of items to take
:return: ResourceCollectionTenantResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.index_response_descriptor_tenants_tenant_tenant_resource_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.index_response_descriptor_tenants_tenant_tenant_resource_with_http_info(**kwargs) # noqa: E501
return data
def index_response_descriptor_tenants_tenant_tenant_resource_with_http_info(self, **kwargs): # noqa: E501
"""Get a list of TenantResources # noqa: E501
Lists all of the tenants in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name, and returned 30 at a time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.index_response_descriptor_tenants_tenant_tenant_resource_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int skip: Number of items to skip
:param int take: Number of items to take
:return: ResourceCollectionTenantResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['skip', 'take'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method index_response_descriptor_tenants_tenant_tenant_resource" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'skip' in params:
query_params.append(('skip', params['skip'])) # noqa: E501
if 'take' in params:
query_params.append(('take', params['take'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceCollectionTenantResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def index_response_descriptor_tenants_tenant_tenant_resource_spaces(self, base_space_id, **kwargs): # noqa: E501
"""Get a list of TenantResources # noqa: E501
Lists all of the tenants in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name, and returned 30 at a time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.index_response_descriptor_tenants_tenant_tenant_resource_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param int skip: Number of items to skip
:param int take: Number of items to take
:return: ResourceCollectionTenantResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.index_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.index_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def index_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""Get a list of TenantResources # noqa: E501
Lists all of the tenants in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name, and returned 30 at a time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.index_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param int skip: Number of items to skip
:param int take: Number of items to take
:return: ResourceCollectionTenantResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'skip', 'take'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method index_response_descriptor_tenants_tenant_tenant_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `index_response_descriptor_tenants_tenant_tenant_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
if 'skip' in params:
query_params.append(('skip', params['skip'])) # noqa: E501
if 'take' in params:
query_params.append(('take', params['take'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceCollectionTenantResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_all_response_descriptor_tenants_tenant_tenant_resource(self, **kwargs): # noqa: E501
"""Get a list of TenantResources # noqa: E501
Lists all of the tenants in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_response_descriptor_tenants_tenant_tenant_resource(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[TenantResource]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_all_response_descriptor_tenants_tenant_tenant_resource_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_all_response_descriptor_tenants_tenant_tenant_resource_with_http_info(**kwargs) # noqa: E501
return data
def list_all_response_descriptor_tenants_tenant_tenant_resource_with_http_info(self, **kwargs): # noqa: E501
"""Get a list of TenantResources # noqa: E501
Lists all of the tenants in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_response_descriptor_tenants_tenant_tenant_resource_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[TenantResource]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_all_response_descriptor_tenants_tenant_tenant_resource" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/all', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[TenantResource]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_all_response_descriptor_tenants_tenant_tenant_resource_spaces(self, base_space_id, **kwargs): # noqa: E501
"""Get a list of TenantResources # noqa: E501
Lists all of the tenants in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_response_descriptor_tenants_tenant_tenant_resource_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: list[TenantResource]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_all_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.list_all_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def list_all_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""Get a list of TenantResources # noqa: E501
Lists all of the tenants in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: list[TenantResource]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_all_response_descriptor_tenants_tenant_tenant_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `list_all_response_descriptor_tenants_tenant_tenant_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/all', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[TenantResource]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def load_response_descriptor_tenants_tenant_tenant_resource(self, id, **kwargs): # noqa: E501
"""Get a TenantResource by ID # noqa: E501
Gets a single tenant by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_response_descriptor_tenants_tenant_tenant_resource(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the TenantResource to load (required)
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.load_response_descriptor_tenants_tenant_tenant_resource_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.load_response_descriptor_tenants_tenant_tenant_resource_with_http_info(id, **kwargs) # noqa: E501
return data
def load_response_descriptor_tenants_tenant_tenant_resource_with_http_info(self, id, **kwargs): # noqa: E501
"""Get a TenantResource by ID # noqa: E501
Gets a single tenant by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_response_descriptor_tenants_tenant_tenant_resource_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the TenantResource to load (required)
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method load_response_descriptor_tenants_tenant_tenant_resource" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `load_response_descriptor_tenants_tenant_tenant_resource`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def load_response_descriptor_tenants_tenant_tenant_resource_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""Get a TenantResource by ID # noqa: E501
Gets a single tenant by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_response_descriptor_tenants_tenant_tenant_resource_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the TenantResource to load (required)
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.load_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.load_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def load_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""Get a TenantResource by ID # noqa: E501
Gets a single tenant by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the TenantResource to load (required)
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method load_response_descriptor_tenants_tenant_tenant_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `load_response_descriptor_tenants_tenant_tenant_resource_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `load_response_descriptor_tenants_tenant_tenant_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_response_descriptor_tenants_tenant_tenant_resource(self, id, **kwargs): # noqa: E501
"""Modify a TenantResource by ID # noqa: E501
Modifies an existing tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_response_descriptor_tenants_tenant_tenant_resource(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the TenantResource to modify (required)
:param TenantResource tenant_resource: The TenantResource resource to create
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_response_descriptor_tenants_tenant_tenant_resource_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.modify_response_descriptor_tenants_tenant_tenant_resource_with_http_info(id, **kwargs) # noqa: E501
return data
def modify_response_descriptor_tenants_tenant_tenant_resource_with_http_info(self, id, **kwargs): # noqa: E501
"""Modify a TenantResource by ID # noqa: E501
Modifies an existing tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_response_descriptor_tenants_tenant_tenant_resource_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the TenantResource to modify (required)
:param TenantResource tenant_resource: The TenantResource resource to create
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'tenant_resource'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_response_descriptor_tenants_tenant_tenant_resource" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `modify_response_descriptor_tenants_tenant_tenant_resource`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'tenant_resource' in params:
body_params = params['tenant_resource']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_response_descriptor_tenants_tenant_tenant_resource_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""Modify a TenantResource by ID # noqa: E501
Modifies an existing tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_response_descriptor_tenants_tenant_tenant_resource_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the TenantResource to modify (required)
:param TenantResource tenant_resource: The TenantResource resource to create
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.modify_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def modify_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""Modify a TenantResource by ID # noqa: E501
Modifies an existing tenant. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_response_descriptor_tenants_tenant_tenant_resource_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the TenantResource to modify (required)
:param TenantResource tenant_resource: The TenantResource resource to create
:return: TenantResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id', 'tenant_resource'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_response_descriptor_tenants_tenant_tenant_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `modify_response_descriptor_tenants_tenant_tenant_resource_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `modify_response_descriptor_tenants_tenant_tenant_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'tenant_resource' in params:
body_params = params['tenant_resource']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 47.612872
| 212
| 0.667541
| 17,853
| 148,695
| 5.194925
| 0.013275
| 0.03942
| 0.028465
| 0.06534
| 0.994469
| 0.994469
| 0.994469
| 0.99324
| 0.993024
| 0.992884
| 0
| 0.013265
| 0.257285
| 148,695
| 3,122
| 213
| 47.628123
| 0.826527
| 0.350301
| 0
| 0.839317
| 1
| 0
| 0.218049
| 0.098458
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038258
| false
| 0
| 0.002354
| 0
| 0.097705
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4b2b8f70d6bb6436bc9cb976f782e83edcd7cb1c
| 166
|
py
|
Python
|
tests/test_global.py
|
Jakob-Daugherty/ftlengine
|
a655589b560c16c0c3509d2bd9f7c70a0ed321cc
|
[
"MIT"
] | 1
|
2021-06-17T16:03:13.000Z
|
2021-06-17T16:03:13.000Z
|
tests/test_global.py
|
Jakob-Daugherty/ftlengine
|
a655589b560c16c0c3509d2bd9f7c70a0ed321cc
|
[
"MIT"
] | null | null | null |
tests/test_global.py
|
Jakob-Daugherty/ftlengine
|
a655589b560c16c0c3509d2bd9f7c70a0ed321cc
|
[
"MIT"
] | null | null | null |
from src import __version__, __version_info__
def test_version():
assert len(__version__) == 6
def test_version_info():
assert len(__version_info__) == 3
| 16.6
| 45
| 0.73494
| 22
| 166
| 4.590909
| 0.5
| 0.326733
| 0.277228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014599
| 0.174699
| 166
| 9
| 46
| 18.444444
| 0.722628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.4
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
4bbb6949bc19ba48d18243b754f90483bb904c06
| 411,618
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_clns_isis_cfg.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_clns_isis_cfg.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_clns_isis_cfg.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'IsisSnpAuthEnum' : _MetaInfoEnum('IsisSnpAuthEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'send-only':'send_only',
'full':'full',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibMaxAreaAddressMismatchBooleanEnum' : _MetaInfoEnum('IsisMibMaxAreaAddressMismatchBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibLspTooLargeToPropagateBooleanEnum' : _MetaInfoEnum('IsisMibLspTooLargeToPropagateBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibSequenceNumberSkipBooleanEnum' : _MetaInfoEnum('IsisMibSequenceNumberSkipBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisInterfaceFrrTiebreakerEnum' : _MetaInfoEnum('IsisInterfaceFrrTiebreakerEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'node-protecting':'node_protecting',
'srlg-disjoint':'srlg_disjoint',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisAuthenticationAlgorithmEnum' : _MetaInfoEnum('IsisAuthenticationAlgorithmEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'cleartext':'cleartext',
'hmac-md5':'hmac_md5',
'keychain':'keychain',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisOverloadBitModeEnum' : _MetaInfoEnum('IsisOverloadBitModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'permanently-set':'permanently_set',
'startup-period':'startup_period',
'wait-for-bgp':'wait_for_bgp',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibRejectedAdjacencyBooleanEnum' : _MetaInfoEnum('IsisMibRejectedAdjacencyBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibCorruptedLspDetectedBooleanEnum' : _MetaInfoEnum('IsisMibCorruptedLspDetectedBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisAdjCheckEnum' : _MetaInfoEnum('IsisAdjCheckEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'disabled':'disabled',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisispfStateEnum' : _MetaInfoEnum('IsisispfStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'enabled':'enabled',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisfrrLoadSharingEnum' : _MetaInfoEnum('IsisfrrLoadSharingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'disable':'disable',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibAuthenticationFailureBooleanEnum' : _MetaInfoEnum('IsisMibAuthenticationFailureBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisInterfaceStateEnum' : _MetaInfoEnum('IsisInterfaceStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'shutdown':'shutdown',
'suppressed':'suppressed',
'passive':'passive',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisTracingModeEnum' : _MetaInfoEnum('IsisTracingModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'off':'off',
'basic':'basic',
'enhanced':'enhanced',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMetricStyleEnum' : _MetaInfoEnum('IsisMetricStyleEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'old-metric-style':'old_metric_style',
'new-metric-style':'new_metric_style',
'both-metric-style':'both_metric_style',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisNsfFlavorEnum' : _MetaInfoEnum('IsisNsfFlavorEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'cisco-proprietary-nsf':'cisco_proprietary_nsf',
'ietf-standard-nsf':'ietf_standard_nsf',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisInterfaceAfStateEnum' : _MetaInfoEnum('IsisInterfaceAfStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'disable':'disable',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisApplyWeightEnum' : _MetaInfoEnum('IsisApplyWeightEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'ecmp-only':'ecmp_only',
'ucmp-only':'ucmp_only',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisPrefixPriorityEnum' : _MetaInfoEnum('IsisPrefixPriorityEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'critical-priority':'critical_priority',
'high-priority':'high_priority',
'medium-priority':'medium_priority',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibAuthenticationTypeFailureBooleanEnum' : _MetaInfoEnum('IsisMibAuthenticationTypeFailureBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMicroLoopAvoidanceEnum' : _MetaInfoEnum('IsisMicroLoopAvoidanceEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'not-set':'not_set',
'micro-loop-avoidance-all':'micro_loop_avoidance_all',
'micro-loop-avoidance-protected':'micro_loop_avoidance_protected',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisAdvTypeExternalEnum' : _MetaInfoEnum('IsisAdvTypeExternalEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'external':'external',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisRemoteLfaEnum' : _MetaInfoEnum('IsisRemoteLfaEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'remote-lfa-none':'remote_lfa_none',
'remote-lfa-tunnel-ldp':'remote_lfa_tunnel_ldp',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibAreaMismatchBooleanEnum' : _MetaInfoEnum('IsisMibAreaMismatchBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibAttemptToExceedMaxSequenceBooleanEnum' : _MetaInfoEnum('IsisMibAttemptToExceedMaxSequenceBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisConfigurableLevelsEnum' : _MetaInfoEnum('IsisConfigurableLevelsEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'level1':'level1',
'level2':'level2',
'level1-and2':'level1_and2',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisfrrTiebreakerEnum' : _MetaInfoEnum('IsisfrrTiebreakerEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'downstream':'downstream',
'lc-disjoint':'lc_disjoint',
'lowest-backup-metric':'lowest_backup_metric',
'node-protecting':'node_protecting',
'primary-path':'primary_path',
'secondary-path':'secondary_path',
'srlg-disjoint':'srlg_disjoint',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibManualAddressDropsBooleanEnum' : _MetaInfoEnum('IsisMibManualAddressDropsBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMetricStyleTransitionEnum' : _MetaInfoEnum('IsisMetricStyleTransitionEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'disabled':'disabled',
'enabled':'enabled',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisexplicitNullFlagEnum' : _MetaInfoEnum('IsisexplicitNullFlagEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'disable':'disable',
'enable':'enable',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMetricEnum' : _MetaInfoEnum('IsisMetricEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'internal':'internal',
'external':'external',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisHelloPaddingEnum' : _MetaInfoEnum('IsisHelloPaddingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'never':'never',
'sometimes':'sometimes',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibDatabaseOverFlowBooleanEnum' : _MetaInfoEnum('IsisMibDatabaseOverFlowBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisAdvTypeInterLevelEnum' : _MetaInfoEnum('IsisAdvTypeInterLevelEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'inter-level':'inter_level',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisAuthenticationFailureModeEnum' : _MetaInfoEnum('IsisAuthenticationFailureModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'drop':'drop',
'send-only':'send_only',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibProtocolsSupportedMismatchBooleanEnum' : _MetaInfoEnum('IsisMibProtocolsSupportedMismatchBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisRedistProtoEnum' : _MetaInfoEnum('IsisRedistProtoEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'connected':'connected',
'static':'static',
'ospf':'ospf',
'bgp':'bgp',
'isis':'isis',
'ospfv3':'ospfv3',
'rip':'rip',
'eigrp':'eigrp',
'subscriber':'subscriber',
'application':'application',
'mobile':'mobile',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisphpFlagEnum' : _MetaInfoEnum('IsisphpFlagEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'enable':'enable',
'disable':'disable',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibIdLengthMismatchBooleanEnum' : _MetaInfoEnum('IsisMibIdLengthMismatchBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibAllBooleanEnum' : _MetaInfoEnum('IsisMibAllBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibOriginatedLspBufferSizeMismatchBooleanEnum' : _MetaInfoEnum('IsisMibOriginatedLspBufferSizeMismatchBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsissidEnum' : _MetaInfoEnum('IsissidEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'index':'index',
'absolute':'absolute',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisfrrEnum' : _MetaInfoEnum('IsisfrrEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'per-link':'per_link',
'per-prefix':'per_prefix',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisAttachedBitEnum' : _MetaInfoEnum('IsisAttachedBitEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'area':'area',
'on':'on',
'off':'off',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'NflagClearEnum' : _MetaInfoEnum('NflagClearEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'disable':'disable',
'enable':'enable',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisLabelPreferenceEnum' : _MetaInfoEnum('IsisLabelPreferenceEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'ldp':'ldp',
'segment-routing':'segment_routing',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibAdjacencyChangeBooleanEnum' : _MetaInfoEnum('IsisMibAdjacencyChangeBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibLspErrorDetectedBooleanEnum' : _MetaInfoEnum('IsisMibLspErrorDetectedBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibOwnLspPurgeBooleanEnum' : _MetaInfoEnum('IsisMibOwnLspPurgeBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'IsisMibVersionSkewBooleanEnum' : _MetaInfoEnum('IsisMibVersionSkewBooleanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'false':'false',
'true':'true',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'Isis.Instances.Instance.Srgb' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Srgb',
False,
[
_MetaInfoClassMember('lower-bound', ATTRIBUTE, 'int' , None, None,
[('16000', '1048574')], [],
''' The lower bound of the SRGB
''',
'lower_bound',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('upper-bound', ATTRIBUTE, 'int' , None, None,
[('16001', '1048575')], [],
''' The upper bound of the SRGB
''',
'upper_bound',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'srgb',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspGenerationIntervals.LspGenerationInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspGenerationIntervals.LspGenerationInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('initial-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Initial wait before generating local LSP in
milliseconds
''',
'initial_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('maximum-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Maximum wait before generating local LSP in
milliseconds
''',
'maximum_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('secondary-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Secondary wait before generating local LSP
in milliseconds
''',
'secondary_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-generation-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspGenerationIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspGenerationIntervals',
False,
[
_MetaInfoClassMember('lsp-generation-interval', REFERENCE_LIST, 'LspGenerationInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspGenerationIntervals.LspGenerationInterval',
[], [],
''' LSP generation scheduling parameters
''',
'lsp_generation_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-generation-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspArrivalTimes.LspArrivalTime' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspArrivalTimes.LspArrivalTime',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('initial-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Initial delay expected to take since last
LSPin milliseconds
''',
'initial_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('maximum-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Maximum delay expected to take since last
LSPin milliseconds
''',
'maximum_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('secondary-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Secondary delay expected to take since last
LSPin milliseconds
''',
'secondary_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-arrival-time',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspArrivalTimes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspArrivalTimes',
False,
[
_MetaInfoClassMember('lsp-arrival-time', REFERENCE_LIST, 'LspArrivalTime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspArrivalTimes.LspArrivalTime',
[], [],
''' Minimum LSP arrival time
''',
'lsp_arrival_time',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-arrival-times',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.TraceBufferSize' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.TraceBufferSize',
False,
[
_MetaInfoClassMember('detailed', ATTRIBUTE, 'int' , None, None,
[('1', '1000000')], [],
''' Buffer size for detailed traces
''',
'detailed',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('hello', ATTRIBUTE, 'int' , None, None,
[('1', '1000000')], [],
''' Buffer size for hello trace
''',
'hello',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('severe', ATTRIBUTE, 'int' , None, None,
[('1', '1000000')], [],
''' Buffer size for severe trace
''',
'severe',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('standard', ATTRIBUTE, 'int' , None, None,
[('1', '1000000')], [],
''' Buffer size for standard traces
''',
'standard',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'trace-buffer-size',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.MaxLinkMetrics.MaxLinkMetric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.MaxLinkMetrics.MaxLinkMetric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
],
'Cisco-IOS-XR-clns-isis-cfg',
'max-link-metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.MaxLinkMetrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.MaxLinkMetrics',
False,
[
_MetaInfoClassMember('max-link-metric', REFERENCE_LIST, 'MaxLinkMetric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.MaxLinkMetrics.MaxLinkMetric',
[], [],
''' Max Link Metric
''',
'max_link_metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'max-link-metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting.PrefixSidMap' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting.PrefixSidMap',
False,
[
_MetaInfoClassMember('advertise-local', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Segment Routing prefix SID map
advertise local
''',
'advertise_local',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('receive', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, remote prefix SID map
advertisements will be used. If FALSE,
they will not be used.
''',
'receive',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'prefix-sid-map',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting',
False,
[
_MetaInfoClassMember('mpls', REFERENCE_ENUM_CLASS, 'IsisLabelPreferenceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisLabelPreferenceEnum',
[], [],
''' Prefer segment routing labels over LDP
labels
''',
'mpls',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('prefix-sid-map', REFERENCE_CLASS, 'PrefixSidMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting.PrefixSidMap',
[], [],
''' Enable Segment Routing prefix SID map
configuration
''',
'prefix_sid_map',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'segment-routing',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.MetricStyles.MetricStyle' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.MetricStyles.MetricStyle',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('style', REFERENCE_ENUM_CLASS, 'IsisMetricStyleEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricStyleEnum',
[], [],
''' Metric Style
''',
'style',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('transition-state', REFERENCE_ENUM_CLASS, 'IsisMetricStyleTransitionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricStyleTransitionEnum',
[], [],
''' Transition state
''',
'transition_state',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metric-style',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.MetricStyles' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.MetricStyles',
False,
[
_MetaInfoClassMember('metric-style', REFERENCE_LIST, 'MetricStyle' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.MetricStyles.MetricStyle',
[], [],
''' Configuration of metric style in LSPs
''',
'metric_style',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metric-styles',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings.FrrLoadSharing' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings.FrrLoadSharing',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('load-sharing', REFERENCE_ENUM_CLASS, 'IsisfrrLoadSharingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrLoadSharingEnum',
[], [],
''' Load sharing
''',
'load_sharing',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-load-sharing',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings',
False,
[
_MetaInfoClassMember('frr-load-sharing', REFERENCE_LIST, 'FrrLoadSharing' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings.FrrLoadSharing',
[], [],
''' Disable load sharing
''',
'frr_load_sharing',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-load-sharings',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits.PriorityLimit' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits.PriorityLimit',
False,
[
_MetaInfoClassMember('frr-type', REFERENCE_ENUM_CLASS, 'IsisfrrEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrEnum',
[], [],
''' Computation Type
''',
'frr_type',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('priority', REFERENCE_ENUM_CLASS, 'IsisPrefixPriorityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisPrefixPriorityEnum',
[], [],
''' Compute for all prefixes upto the
specified priority
''',
'priority',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'priority-limit',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits',
False,
[
_MetaInfoClassMember('priority-limit', REFERENCE_LIST, 'PriorityLimit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits.PriorityLimit',
[], [],
''' Limit backup computation upto the prefix
priority
''',
'priority_limit',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'priority-limits',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('prefix-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of the prefix list
''',
'prefix_list_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-prefix',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes',
False,
[
_MetaInfoClassMember('frr-remote-lfa-prefix', REFERENCE_LIST, 'FrrRemoteLfaPrefix' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix',
[], [],
''' Filter remote LFA router IDs using
prefix-list
''',
'frr_remote_lfa_prefix',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-prefixes',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers.FrrTiebreaker' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers.FrrTiebreaker',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('tiebreaker', REFERENCE_ENUM_CLASS, 'IsisfrrTiebreakerEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrTiebreakerEnum',
[], [],
''' Tiebreaker for which configuration
applies
''',
'tiebreaker',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Preference order among tiebreakers
''',
'index',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-tiebreaker',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers',
False,
[
_MetaInfoClassMember('frr-tiebreaker', REFERENCE_LIST, 'FrrTiebreaker' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers.FrrTiebreaker',
[], [],
''' Configure tiebreaker for multiple backups
''',
'frr_tiebreaker',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-tiebreakers',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies.FrrUseCandOnly' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies.FrrUseCandOnly',
False,
[
_MetaInfoClassMember('frr-type', REFERENCE_ENUM_CLASS, 'IsisfrrEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrEnum',
[], [],
''' Computation Type
''',
'frr_type',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-use-cand-only',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies',
False,
[
_MetaInfoClassMember('frr-use-cand-only', REFERENCE_LIST, 'FrrUseCandOnly' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies.FrrUseCandOnly',
[], [],
''' Configure use candidate only to exclude
interfaces as backup
''',
'frr_use_cand_only',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-use-cand-onlies',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.FrrTable' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.FrrTable',
False,
[
_MetaInfoClassMember('frr-load-sharings', REFERENCE_CLASS, 'FrrLoadSharings' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings',
[], [],
''' Load share prefixes across multiple
backups
''',
'frr_load_sharings',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-remote-lfa-prefixes', REFERENCE_CLASS, 'FrrRemoteLfaPrefixes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes',
[], [],
''' FRR remote LFA prefix list filter
configuration
''',
'frr_remote_lfa_prefixes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-tiebreakers', REFERENCE_CLASS, 'FrrTiebreakers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers',
[], [],
''' FRR tiebreakers configuration
''',
'frr_tiebreakers',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-use-cand-onlies', REFERENCE_CLASS, 'FrrUseCandOnlies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies',
[], [],
''' FRR use candidate only configuration
''',
'frr_use_cand_onlies',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('priority-limits', REFERENCE_CLASS, 'PriorityLimits' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits',
[], [],
''' FRR prefix-limit configuration
''',
'priority_limits',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-table',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.RouterId' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.RouterId',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IPv4/IPv6 address to be used as a router
ID. Precisely one of Address and Interface
must be specified.
''',
'address',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface with designated stable IP
address to be used as a router ID. This
must be a Loopback interface. Precisely
one of Address and Interface must be
specified.
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'router-id',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities.SpfPrefixPriority' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities.SpfPrefixPriority',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' SPF Level for prefix prioritization
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('prefix-priority-type', REFERENCE_ENUM_CLASS, 'IsisPrefixPriorityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisPrefixPriorityEnum',
[], [],
''' SPF Priority to assign matching prefixes
''',
'prefix_priority_type',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access List to determine prefixes for
this priority
''',
'access_list_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('admin-tag', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Tag value to determine prefixes for this
priority
''',
'admin_tag',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-prefix-priority',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities',
False,
[
_MetaInfoClassMember('spf-prefix-priority', REFERENCE_LIST, 'SpfPrefixPriority' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities.SpfPrefixPriority',
[], [],
''' Determine SPF priority for prefixes
''',
'spf_prefix_priority',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-prefix-priorities',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes.SummaryPrefix' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes.SummaryPrefix',
False,
[
_MetaInfoClassMember('address-prefix', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IP summary address prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True, [
_MetaInfoClassMember('address-prefix', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'],
''' IP summary address prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('address-prefix', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'],
''' IP summary address prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True),
]),
_MetaInfoClassMember('level', ATTRIBUTE, 'int' , None, None,
[('1', '2')], [],
''' Level in which to summarize routes
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' The tag value
''',
'tag',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'summary-prefix',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes',
False,
[
_MetaInfoClassMember('summary-prefix', REFERENCE_LIST, 'SummaryPrefix' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes.SummaryPrefix',
[], [],
''' Configure IP address prefixes to advertise
''',
'summary_prefix',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'summary-prefixes',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.MicroLoopAvoidance' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.MicroLoopAvoidance',
False,
[
_MetaInfoClassMember('enable', REFERENCE_ENUM_CLASS, 'IsisMicroLoopAvoidanceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMicroLoopAvoidanceEnum',
[], [],
''' MicroLoop avoidance enable configuration
''',
'enable',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('rib-update-delay', ATTRIBUTE, 'int' , None, None,
[('1000', '65535')], [],
''' Value of delay in msecs in updating RIB
''',
'rib_update_delay',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'micro-loop-avoidance',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Ucmp.Enable' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Ucmp.Enable',
False,
[
_MetaInfoClassMember('prefix-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of the Prefix List
''',
'prefix_list_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('variance', ATTRIBUTE, 'int' , None, None,
[('101', '10000')], [],
''' Value of variance
''',
'variance',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'enable',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces.ExcludeInterface' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces.ExcludeInterface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Name of the interface to be excluded
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
],
'Cisco-IOS-XR-clns-isis-cfg',
'exclude-interface',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces',
False,
[
_MetaInfoClassMember('exclude-interface', REFERENCE_LIST, 'ExcludeInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces.ExcludeInterface',
[], [],
''' Exclude this interface from UCMP path
computation
''',
'exclude_interface',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'exclude-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Ucmp' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Ucmp',
False,
[
_MetaInfoClassMember('delay-interval', ATTRIBUTE, 'int' , None, None,
[('100', '65535')], [],
''' Delay in msecs between primary SPF and
UCMP computation
''',
'delay_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('enable', REFERENCE_CLASS, 'Enable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Ucmp.Enable',
[], [],
''' UCMP feature enable configuration
''',
'enable',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('exclude-interfaces', REFERENCE_CLASS, 'ExcludeInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces',
[], [],
''' Interfaces excluded from UCMP path
computation
''',
'exclude_interfaces',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'ucmp',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes.MaxRedistPrefix' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes.MaxRedistPrefix',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('prefix-limit', ATTRIBUTE, 'int' , None, None,
[('1', '28000')], [],
''' Max number of prefixes
''',
'prefix_limit',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'max-redist-prefix',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes',
False,
[
_MetaInfoClassMember('max-redist-prefix', REFERENCE_LIST, 'MaxRedistPrefix' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes.MaxRedistPrefix',
[], [],
''' An upper limit on the number of
redistributed prefixes which may be
included in the local system's LSP
''',
'max_redist_prefix',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'max-redist-prefixes',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Propagations.Propagation' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Propagations.Propagation',
False,
[
_MetaInfoClassMember('destination-level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Destination level for routes. Must
differ from SourceLevel
''',
'destination_level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('source-level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Source level for routes
''',
'source_level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Route policy limiting routes to be
propagated
''',
'route_policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'propagation',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Propagations' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Propagations',
False,
[
_MetaInfoClassMember('propagation', REFERENCE_LIST, 'Propagation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Propagations.Propagation',
[], [],
''' Propagate routes between IS-IS levels
''',
'propagation',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'propagations',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile',
False,
[
_MetaInfoClassMember('levels', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Levels to redistribute routes into
''',
'levels',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('0', '16777215')], [],
''' Metric for redistributed routes: <0-63>
for narrow, <0-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-type', REFERENCE_ENUM_CLASS, 'IsisMetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricEnum',
[], [],
''' IS-IS metric type
''',
'metric_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ospf-route-type', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' OSPF route types to redistribute. May
only be specified if Protocol is OSPF.
''',
'ospf_route_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Route policy to control redistribution.
''',
'route_policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'connected-or-static-or-rip-or-subscriber-or-mobile',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication',
False,
[
_MetaInfoClassMember('instance-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Protocol Instance Identifier. Mandatory
for ISIS, OSPF and application, must not
be specified otherwise.
''',
'instance_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('levels', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Levels to redistribute routes into
''',
'levels',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('0', '16777215')], [],
''' Metric for redistributed routes: <0-63>
for narrow, <0-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-type', REFERENCE_ENUM_CLASS, 'IsisMetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricEnum',
[], [],
''' IS-IS metric type
''',
'metric_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ospf-route-type', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' OSPF route types to redistribute. May
only be specified if Protocol is OSPF.
''',
'ospf_route_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Route policy to control redistribution.
''',
'route_policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'ospf-or-ospfv3-or-isis-or-application',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Bgp' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Bgp',
False,
[
_MetaInfoClassMember('as-xx', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' First half of BGP AS number in XX.YY
format. Mandatory if Protocol is BGP
and must not be specified otherwise.
Must be a non-zero value if second half
is zero.
''',
'as_xx',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('as-yy', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Second half of BGP AS number in XX.YY
format. Mandatory if Protocol is BGP and
must not be specified otherwise. Must be
a non-zero value if first half is zero.
''',
'as_yy',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('levels', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Levels to redistribute routes into
''',
'levels',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('0', '16777215')], [],
''' Metric for redistributed routes: <0-63>
for narrow, <0-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-type', REFERENCE_ENUM_CLASS, 'IsisMetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricEnum',
[], [],
''' IS-IS metric type
''',
'metric_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ospf-route-type', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' OSPF route types to redistribute. May
only be specified if Protocol is OSPF.
''',
'ospf_route_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Route policy to control redistribution.
''',
'route_policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'bgp',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Eigrp' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Eigrp',
False,
[
_MetaInfoClassMember('as-zz', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Eigrp as number.
''',
'as_zz',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('levels', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Levels to redistribute routes into
''',
'levels',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('0', '16777215')], [],
''' Metric for redistributed routes: <0-63>
for narrow, <0-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-type', REFERENCE_ENUM_CLASS, 'IsisMetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricEnum',
[], [],
''' IS-IS metric type
''',
'metric_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ospf-route-type', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' OSPF route types to redistribute. May
only be specified if Protocol is OSPF.
''',
'ospf_route_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Route policy to control redistribution.
''',
'route_policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'eigrp',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution',
False,
[
_MetaInfoClassMember('protocol-name', REFERENCE_ENUM_CLASS, 'IsisRedistProtoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisRedistProtoEnum',
[], [],
''' The protocol to be redistributed. OSPFv3
may not be specified for an IPv4 topology
and OSPF may not be specified for an IPv6
topology.
''',
'protocol_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('bgp', REFERENCE_LIST, 'Bgp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Bgp',
[], [],
''' bgp
''',
'bgp',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('connected-or-static-or-rip-or-subscriber-or-mobile', REFERENCE_CLASS, 'ConnectedOrStaticOrRipOrSubscriberOrMobile' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile',
[], [],
''' connected or static or rip or subscriber
or mobile
''',
'connected_or_static_or_rip_or_subscriber_or_mobile',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('eigrp', REFERENCE_LIST, 'Eigrp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Eigrp',
[], [],
''' eigrp
''',
'eigrp',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ospf-or-ospfv3-or-isis-or-application', REFERENCE_LIST, 'OspfOrOspfv3OrIsisOrApplication' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication',
[], [],
''' ospf or ospfv3 or isis or application
''',
'ospf_or_ospfv3_or_isis_or_application',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'redistribution',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Redistributions' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Redistributions',
False,
[
_MetaInfoClassMember('redistribution', REFERENCE_LIST, 'Redistribution' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution',
[], [],
''' Redistribution of other protocols into
this IS-IS instance
''',
'redistribution',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'redistributions',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals.SpfPeriodicInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals.SpfPeriodicInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('periodic-interval', ATTRIBUTE, 'int' , None, None,
[('0', '3600')], [],
''' Maximum interval in between SPF runs in
seconds
''',
'periodic_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-periodic-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals',
False,
[
_MetaInfoClassMember('spf-periodic-interval', REFERENCE_LIST, 'SpfPeriodicInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals.SpfPeriodicInterval',
[], [],
''' Maximum interval between spf runs
''',
'spf_periodic_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-periodic-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals.SpfInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals.SpfInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('initial-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Initial wait before running a route
calculation in milliseconds
''',
'initial_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('maximum-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Maximum wait before running a route
calculation in milliseconds
''',
'maximum_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('secondary-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Secondary wait before running a route
calculation in milliseconds
''',
'secondary_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals',
False,
[
_MetaInfoClassMember('spf-interval', REFERENCE_LIST, 'SpfInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals.SpfInterval',
[], [],
''' Route calculation scheduling parameters
''',
'spf_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.MonitorConvergence' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.MonitorConvergence',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable convergence monitoring
''',
'enable',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('prefix-list', ATTRIBUTE, 'str' , None, None,
[], [],
''' Enable the monitoring of individual
prefixes (prefix list name)
''',
'prefix_list',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('track-ip-frr', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable the Tracking of IP-Frr Convergence
''',
'track_ip_frr',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'monitor-convergence',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.DefaultInformation' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.DefaultInformation',
False,
[
_MetaInfoClassMember('external', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Flag to indicate that the default prefix
should be originated as an external route
''',
'external',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy name
''',
'policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('use-policy', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Flag to indicate whether default
origination is controlled using a policy
''',
'use_policy',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'default-information',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.AdminDistances.AdminDistance' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.AdminDistances.AdminDistance',
False,
[
_MetaInfoClassMember('address-prefix', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IP route source prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True, [
_MetaInfoClassMember('address-prefix', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'],
''' IP route source prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('address-prefix', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'],
''' IP route source prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True),
]),
_MetaInfoClassMember('distance', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Administrative distance
''',
'distance',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('prefix-list', ATTRIBUTE, 'str' , None, None,
[], [],
''' List of prefixes to which this distance
applies
''',
'prefix_list',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'admin-distance',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.AdminDistances' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.AdminDistances',
False,
[
_MetaInfoClassMember('admin-distance', REFERENCE_LIST, 'AdminDistance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.AdminDistances.AdminDistance',
[], [],
''' Administrative distance configuration. The
supplied distance is applied to all routes
discovered from the specified source, or
only those that match the supplied prefix
list if this is specified
''',
'admin_distance',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'admin-distances',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Ispf.States.State' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Ispf.States.State',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'IsisispfStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisispfStateEnum',
[], [],
''' State
''',
'state',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'state',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Ispf.States' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Ispf.States',
False,
[
_MetaInfoClassMember('state', REFERENCE_LIST, 'State' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Ispf.States.State',
[], [],
''' Enable/disable ISPF
''',
'state',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'states',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Ispf' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Ispf',
False,
[
_MetaInfoClassMember('states', REFERENCE_CLASS, 'States' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Ispf.States',
[], [],
''' ISPF state (enable/disable)
''',
'states',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'ispf',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.MplsLdpGlobal' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.MplsLdpGlobal',
False,
[
_MetaInfoClassMember('auto-config', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, LDP will be enabled onall IS-IS
interfaces enabled for this address-family
''',
'auto_config',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'mpls-ldp-global',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Mpls.RouterId' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Mpls.RouterId',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address to be used as a router ID.
Precisely one of Address and Interface
must be specified.
''',
'address',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface with designated stable IP
address to be used as a router ID. This
must be a Loopback interface. Precisely
one of Address and Interface must be
specified.
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'router-id',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Mpls' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Mpls',
False,
[
_MetaInfoClassMember('igp-intact', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Install TE and non-TE nexthops in the RIB
''',
'igp_intact',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Enable MPLS for an IS-IS at the given
levels
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('multicast-intact', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Install non-TE nexthops in the RIB for use
by multicast
''',
'multicast_intact',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('router-id', REFERENCE_CLASS, 'RouterId' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Mpls.RouterId',
[], [],
''' Traffic Engineering stable IP address for
system
''',
'router_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'mpls',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Metrics.Metric.MetricEnum' : _MetaInfoEnum('MetricEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'maximum':'maximum',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'Isis.Instances.Instance.Afs.Af.AfData.Metrics.Metric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Metrics.Metric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('metric', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False, [
_MetaInfoClassMember('metric', REFERENCE_ENUM_CLASS, 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum',
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
]),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Metrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Metrics',
False,
[
_MetaInfoClassMember('metric', REFERENCE_LIST, 'Metric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Metrics.Metric',
[], [],
''' Metric configuration. Legal value depends on
the metric-style specified for the topology. If
the metric-style defined is narrow, then only a
value between <1-63> is allowed and if the
metric-style is defined as wide, then a value
between <1-16777215> is allowed as the metric
value. All routers exclude links with the
maximum wide metric (16777215) from their SPF
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Weights.Weight' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Weights.Weight',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('weight', ATTRIBUTE, 'int' , None, None,
[('1', '16777214')], [],
''' Weight to be configured under interface for
Load Balancing. Allowed weight: <1-16777215>
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weight',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData.Weights' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData.Weights',
False,
[
_MetaInfoClassMember('weight', REFERENCE_LIST, 'Weight' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Weights.Weight',
[], [],
''' Weight configuration under interface for load
balancing
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weights',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.AfData' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.AfData',
False,
[
_MetaInfoClassMember('adjacency-check', REFERENCE_ENUM_CLASS, 'IsisAdjCheckEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAdjCheckEnum',
[], [],
''' Suppress check for consistent AF support on
received IIHs
''',
'adjacency_check',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('admin-distances', REFERENCE_CLASS, 'AdminDistances' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.AdminDistances',
[], [],
''' Per-route administrative
distanceconfiguration
''',
'admin_distances',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('advertise-link-attributes', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, advertise additional link
attributes in our LSP
''',
'advertise_link_attributes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('advertise-passive-only', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' If enabled, advertise prefixes of passive
interfaces only
''',
'advertise_passive_only',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('apply-weight', REFERENCE_ENUM_CLASS, 'IsisApplyWeightEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisApplyWeightEnum',
[], [],
''' Apply weights to UCMP or ECMP only
''',
'apply_weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('attached-bit', REFERENCE_ENUM_CLASS, 'IsisAttachedBitEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAttachedBitEnum',
[], [],
''' Set the attached bit in this router's level
1 System LSP
''',
'attached_bit',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('default-admin-distance', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Default IS-IS administrative distance
configuration.
''',
'default_admin_distance',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('default-information', REFERENCE_CLASS, 'DefaultInformation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.DefaultInformation',
[], [],
''' Control origination of a default route with
the option of using a policy. If no policy
is specified the default route is
advertised with zero cost in level 2 only.
''',
'default_information',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-table', REFERENCE_CLASS, 'FrrTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.FrrTable',
[], [],
''' Fast-ReRoute configuration
''',
'frr_table',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ignore-attached-bit', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, Ignore other routers attached bit
''',
'ignore_attached_bit',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ispf', REFERENCE_CLASS, 'Ispf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Ispf',
[], [],
''' ISPF configuration
''',
'ispf',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('max-redist-prefixes', REFERENCE_CLASS, 'MaxRedistPrefixes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes',
[], [],
''' Maximum number of redistributed
prefixesconfiguration
''',
'max_redist_prefixes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('maximum-paths', ATTRIBUTE, 'int' , None, None,
[('1', '64')], [],
''' Maximum number of active parallel paths per
route
''',
'maximum_paths',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-styles', REFERENCE_CLASS, 'MetricStyles' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.MetricStyles',
[], [],
''' Metric-style configuration
''',
'metric_styles',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metrics', REFERENCE_CLASS, 'Metrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Metrics',
[], [],
''' Metric configuration
''',
'metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('micro-loop-avoidance', REFERENCE_CLASS, 'MicroLoopAvoidance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.MicroLoopAvoidance',
[], [],
''' Micro Loop Avoidance configuration
''',
'micro_loop_avoidance',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('monitor-convergence', REFERENCE_CLASS, 'MonitorConvergence' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.MonitorConvergence',
[], [],
''' Enable convergence monitoring
''',
'monitor_convergence',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mpls', REFERENCE_CLASS, 'Mpls' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Mpls',
[], [],
''' MPLS configuration. MPLS configuration will
only be applied for the IPv4-unicast
address-family.
''',
'mpls',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mpls-ldp-global', REFERENCE_CLASS, 'MplsLdpGlobal' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.MplsLdpGlobal',
[], [],
''' MPLS LDP configuration. MPLS LDP
configuration will only be applied for the
IPv4-unicast address-family.
''',
'mpls_ldp_global',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('propagations', REFERENCE_CLASS, 'Propagations' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Propagations',
[], [],
''' Route propagation configuration
''',
'propagations',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('redistributions', REFERENCE_CLASS, 'Redistributions' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Redistributions',
[], [],
''' Protocol redistribution configuration
''',
'redistributions',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-source-first-hop', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, routes will be installed with the
IP address of the first-hop node as the
source instead of the originating node
''',
'route_source_first_hop',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('router-id', REFERENCE_CLASS, 'RouterId' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.RouterId',
[], [],
''' Stable IP address for system. Will only be
applied for the unicast sub-address-family.
''',
'router_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('segment-routing', REFERENCE_CLASS, 'SegmentRouting' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting',
[], [],
''' Enable Segment Routing configuration
''',
'segment_routing',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('single-topology', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Run IPv6 Unicast using the standard (IPv4
Unicast) topology
''',
'single_topology',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('spf-intervals', REFERENCE_CLASS, 'SpfIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals',
[], [],
''' SPF-interval configuration
''',
'spf_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('spf-periodic-intervals', REFERENCE_CLASS, 'SpfPeriodicIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals',
[], [],
''' Peoridic SPF configuration
''',
'spf_periodic_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('spf-prefix-priorities', REFERENCE_CLASS, 'SpfPrefixPriorities' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities',
[], [],
''' SPF Prefix Priority configuration
''',
'spf_prefix_priorities',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('summary-prefixes', REFERENCE_CLASS, 'SummaryPrefixes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes',
[], [],
''' Summary-prefix configuration
''',
'summary_prefixes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('topology-id', ATTRIBUTE, 'int' , None, None,
[('6', '4095')], [],
''' Set the topology ID for a named
(non-default) topology. This object must be
set before any other configuration is
supplied for a named (non-default) topology
, and must be the last configuration object
to be removed. This item should not be
supplied for the non-named default
topologies.
''',
'topology_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ucmp', REFERENCE_CLASS, 'Ucmp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Ucmp',
[], [],
''' UCMP (UnEqual Cost MultiPath) configuration
''',
'ucmp',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('weights', REFERENCE_CLASS, 'Weights' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData.Weights',
[], [],
''' Weight configuration
''',
'weights',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'af-data',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting.PrefixSidMap' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting.PrefixSidMap',
False,
[
_MetaInfoClassMember('advertise-local', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Segment Routing prefix SID map
advertise local
''',
'advertise_local',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('receive', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, remote prefix SID map
advertisements will be used. If FALSE,
they will not be used.
''',
'receive',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'prefix-sid-map',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting',
False,
[
_MetaInfoClassMember('mpls', REFERENCE_ENUM_CLASS, 'IsisLabelPreferenceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisLabelPreferenceEnum',
[], [],
''' Prefer segment routing labels over LDP
labels
''',
'mpls',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('prefix-sid-map', REFERENCE_CLASS, 'PrefixSidMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting.PrefixSidMap',
[], [],
''' Enable Segment Routing prefix SID map
configuration
''',
'prefix_sid_map',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'segment-routing',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles.MetricStyle' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles.MetricStyle',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('style', REFERENCE_ENUM_CLASS, 'IsisMetricStyleEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricStyleEnum',
[], [],
''' Metric Style
''',
'style',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('transition-state', REFERENCE_ENUM_CLASS, 'IsisMetricStyleTransitionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricStyleTransitionEnum',
[], [],
''' Transition state
''',
'transition_state',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metric-style',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles',
False,
[
_MetaInfoClassMember('metric-style', REFERENCE_LIST, 'MetricStyle' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles.MetricStyle',
[], [],
''' Configuration of metric style in LSPs
''',
'metric_style',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metric-styles',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings.FrrLoadSharing' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings.FrrLoadSharing',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('load-sharing', REFERENCE_ENUM_CLASS, 'IsisfrrLoadSharingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrLoadSharingEnum',
[], [],
''' Load sharing
''',
'load_sharing',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-load-sharing',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings',
False,
[
_MetaInfoClassMember('frr-load-sharing', REFERENCE_LIST, 'FrrLoadSharing' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings.FrrLoadSharing',
[], [],
''' Disable load sharing
''',
'frr_load_sharing',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-load-sharings',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits.PriorityLimit' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits.PriorityLimit',
False,
[
_MetaInfoClassMember('frr-type', REFERENCE_ENUM_CLASS, 'IsisfrrEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrEnum',
[], [],
''' Computation Type
''',
'frr_type',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('priority', REFERENCE_ENUM_CLASS, 'IsisPrefixPriorityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisPrefixPriorityEnum',
[], [],
''' Compute for all prefixes upto the
specified priority
''',
'priority',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'priority-limit',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits',
False,
[
_MetaInfoClassMember('priority-limit', REFERENCE_LIST, 'PriorityLimit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits.PriorityLimit',
[], [],
''' Limit backup computation upto the prefix
priority
''',
'priority_limit',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'priority-limits',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('prefix-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of the prefix list
''',
'prefix_list_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-prefix',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes',
False,
[
_MetaInfoClassMember('frr-remote-lfa-prefix', REFERENCE_LIST, 'FrrRemoteLfaPrefix' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix',
[], [],
''' Filter remote LFA router IDs using
prefix-list
''',
'frr_remote_lfa_prefix',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-prefixes',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers.FrrTiebreaker' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers.FrrTiebreaker',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('tiebreaker', REFERENCE_ENUM_CLASS, 'IsisfrrTiebreakerEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrTiebreakerEnum',
[], [],
''' Tiebreaker for which configuration
applies
''',
'tiebreaker',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Preference order among tiebreakers
''',
'index',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-tiebreaker',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers',
False,
[
_MetaInfoClassMember('frr-tiebreaker', REFERENCE_LIST, 'FrrTiebreaker' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers.FrrTiebreaker',
[], [],
''' Configure tiebreaker for multiple backups
''',
'frr_tiebreaker',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-tiebreakers',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies.FrrUseCandOnly' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies.FrrUseCandOnly',
False,
[
_MetaInfoClassMember('frr-type', REFERENCE_ENUM_CLASS, 'IsisfrrEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrEnum',
[], [],
''' Computation Type
''',
'frr_type',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-use-cand-only',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies',
False,
[
_MetaInfoClassMember('frr-use-cand-only', REFERENCE_LIST, 'FrrUseCandOnly' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies.FrrUseCandOnly',
[], [],
''' Configure use candidate only to exclude
interfaces as backup
''',
'frr_use_cand_only',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-use-cand-onlies',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable',
False,
[
_MetaInfoClassMember('frr-load-sharings', REFERENCE_CLASS, 'FrrLoadSharings' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings',
[], [],
''' Load share prefixes across multiple
backups
''',
'frr_load_sharings',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-remote-lfa-prefixes', REFERENCE_CLASS, 'FrrRemoteLfaPrefixes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes',
[], [],
''' FRR remote LFA prefix list filter
configuration
''',
'frr_remote_lfa_prefixes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-tiebreakers', REFERENCE_CLASS, 'FrrTiebreakers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers',
[], [],
''' FRR tiebreakers configuration
''',
'frr_tiebreakers',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-use-cand-onlies', REFERENCE_CLASS, 'FrrUseCandOnlies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies',
[], [],
''' FRR use candidate only configuration
''',
'frr_use_cand_onlies',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('priority-limits', REFERENCE_CLASS, 'PriorityLimits' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits',
[], [],
''' FRR prefix-limit configuration
''',
'priority_limits',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-table',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.RouterId' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.RouterId',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IPv4/IPv6 address to be used as a router
ID. Precisely one of Address and Interface
must be specified.
''',
'address',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface with designated stable IP
address to be used as a router ID. This
must be a Loopback interface. Precisely
one of Address and Interface must be
specified.
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'router-id',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities.SpfPrefixPriority' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities.SpfPrefixPriority',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' SPF Level for prefix prioritization
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('prefix-priority-type', REFERENCE_ENUM_CLASS, 'IsisPrefixPriorityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisPrefixPriorityEnum',
[], [],
''' SPF Priority to assign matching prefixes
''',
'prefix_priority_type',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access List to determine prefixes for
this priority
''',
'access_list_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('admin-tag', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Tag value to determine prefixes for this
priority
''',
'admin_tag',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-prefix-priority',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities',
False,
[
_MetaInfoClassMember('spf-prefix-priority', REFERENCE_LIST, 'SpfPrefixPriority' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities.SpfPrefixPriority',
[], [],
''' Determine SPF priority for prefixes
''',
'spf_prefix_priority',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-prefix-priorities',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes.SummaryPrefix' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes.SummaryPrefix',
False,
[
_MetaInfoClassMember('address-prefix', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IP summary address prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True, [
_MetaInfoClassMember('address-prefix', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'],
''' IP summary address prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('address-prefix', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'],
''' IP summary address prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True),
]),
_MetaInfoClassMember('level', ATTRIBUTE, 'int' , None, None,
[('1', '2')], [],
''' Level in which to summarize routes
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' The tag value
''',
'tag',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'summary-prefix',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes',
False,
[
_MetaInfoClassMember('summary-prefix', REFERENCE_LIST, 'SummaryPrefix' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes.SummaryPrefix',
[], [],
''' Configure IP address prefixes to advertise
''',
'summary_prefix',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'summary-prefixes',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.MicroLoopAvoidance' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.MicroLoopAvoidance',
False,
[
_MetaInfoClassMember('enable', REFERENCE_ENUM_CLASS, 'IsisMicroLoopAvoidanceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMicroLoopAvoidanceEnum',
[], [],
''' MicroLoop avoidance enable configuration
''',
'enable',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('rib-update-delay', ATTRIBUTE, 'int' , None, None,
[('1000', '65535')], [],
''' Value of delay in msecs in updating RIB
''',
'rib_update_delay',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'micro-loop-avoidance',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.Enable' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.Enable',
False,
[
_MetaInfoClassMember('prefix-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of the Prefix List
''',
'prefix_list_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('variance', ATTRIBUTE, 'int' , None, None,
[('101', '10000')], [],
''' Value of variance
''',
'variance',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'enable',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces.ExcludeInterface' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces.ExcludeInterface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Name of the interface to be excluded
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
],
'Cisco-IOS-XR-clns-isis-cfg',
'exclude-interface',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces',
False,
[
_MetaInfoClassMember('exclude-interface', REFERENCE_LIST, 'ExcludeInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces.ExcludeInterface',
[], [],
''' Exclude this interface from UCMP path
computation
''',
'exclude_interface',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'exclude-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp',
False,
[
_MetaInfoClassMember('delay-interval', ATTRIBUTE, 'int' , None, None,
[('100', '65535')], [],
''' Delay in msecs between primary SPF and
UCMP computation
''',
'delay_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('enable', REFERENCE_CLASS, 'Enable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.Enable',
[], [],
''' UCMP feature enable configuration
''',
'enable',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('exclude-interfaces', REFERENCE_CLASS, 'ExcludeInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces',
[], [],
''' Interfaces excluded from UCMP path
computation
''',
'exclude_interfaces',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'ucmp',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes.MaxRedistPrefix' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes.MaxRedistPrefix',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('prefix-limit', ATTRIBUTE, 'int' , None, None,
[('1', '28000')], [],
''' Max number of prefixes
''',
'prefix_limit',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'max-redist-prefix',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes',
False,
[
_MetaInfoClassMember('max-redist-prefix', REFERENCE_LIST, 'MaxRedistPrefix' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes.MaxRedistPrefix',
[], [],
''' An upper limit on the number of
redistributed prefixes which may be
included in the local system's LSP
''',
'max_redist_prefix',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'max-redist-prefixes',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Propagations.Propagation' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Propagations.Propagation',
False,
[
_MetaInfoClassMember('destination-level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Destination level for routes. Must
differ from SourceLevel
''',
'destination_level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('source-level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Source level for routes
''',
'source_level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Route policy limiting routes to be
propagated
''',
'route_policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'propagation',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Propagations' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Propagations',
False,
[
_MetaInfoClassMember('propagation', REFERENCE_LIST, 'Propagation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Propagations.Propagation',
[], [],
''' Propagate routes between IS-IS levels
''',
'propagation',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'propagations',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile',
False,
[
_MetaInfoClassMember('levels', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Levels to redistribute routes into
''',
'levels',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('0', '16777215')], [],
''' Metric for redistributed routes: <0-63>
for narrow, <0-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-type', REFERENCE_ENUM_CLASS, 'IsisMetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricEnum',
[], [],
''' IS-IS metric type
''',
'metric_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ospf-route-type', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' OSPF route types to redistribute. May
only be specified if Protocol is OSPF.
''',
'ospf_route_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Route policy to control redistribution.
''',
'route_policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'connected-or-static-or-rip-or-subscriber-or-mobile',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication',
False,
[
_MetaInfoClassMember('instance-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Protocol Instance Identifier. Mandatory
for ISIS, OSPF and application, must not
be specified otherwise.
''',
'instance_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('levels', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Levels to redistribute routes into
''',
'levels',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('0', '16777215')], [],
''' Metric for redistributed routes: <0-63>
for narrow, <0-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-type', REFERENCE_ENUM_CLASS, 'IsisMetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricEnum',
[], [],
''' IS-IS metric type
''',
'metric_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ospf-route-type', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' OSPF route types to redistribute. May
only be specified if Protocol is OSPF.
''',
'ospf_route_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Route policy to control redistribution.
''',
'route_policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'ospf-or-ospfv3-or-isis-or-application',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Bgp' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Bgp',
False,
[
_MetaInfoClassMember('as-xx', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' First half of BGP AS number in XX.YY
format. Mandatory if Protocol is BGP
and must not be specified otherwise.
Must be a non-zero value if second half
is zero.
''',
'as_xx',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('as-yy', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Second half of BGP AS number in XX.YY
format. Mandatory if Protocol is BGP and
must not be specified otherwise. Must be
a non-zero value if first half is zero.
''',
'as_yy',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('levels', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Levels to redistribute routes into
''',
'levels',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('0', '16777215')], [],
''' Metric for redistributed routes: <0-63>
for narrow, <0-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-type', REFERENCE_ENUM_CLASS, 'IsisMetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricEnum',
[], [],
''' IS-IS metric type
''',
'metric_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ospf-route-type', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' OSPF route types to redistribute. May
only be specified if Protocol is OSPF.
''',
'ospf_route_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Route policy to control redistribution.
''',
'route_policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'bgp',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Eigrp' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Eigrp',
False,
[
_MetaInfoClassMember('as-zz', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Eigrp as number.
''',
'as_zz',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('levels', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Levels to redistribute routes into
''',
'levels',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('0', '16777215')], [],
''' Metric for redistributed routes: <0-63>
for narrow, <0-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-type', REFERENCE_ENUM_CLASS, 'IsisMetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisMetricEnum',
[], [],
''' IS-IS metric type
''',
'metric_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ospf-route-type', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' OSPF route types to redistribute. May
only be specified if Protocol is OSPF.
''',
'ospf_route_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Route policy to control redistribution.
''',
'route_policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'eigrp',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution',
False,
[
_MetaInfoClassMember('protocol-name', REFERENCE_ENUM_CLASS, 'IsisRedistProtoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisRedistProtoEnum',
[], [],
''' The protocol to be redistributed. OSPFv3
may not be specified for an IPv4 topology
and OSPF may not be specified for an IPv6
topology.
''',
'protocol_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('bgp', REFERENCE_LIST, 'Bgp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Bgp',
[], [],
''' bgp
''',
'bgp',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('connected-or-static-or-rip-or-subscriber-or-mobile', REFERENCE_CLASS, 'ConnectedOrStaticOrRipOrSubscriberOrMobile' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile',
[], [],
''' connected or static or rip or subscriber
or mobile
''',
'connected_or_static_or_rip_or_subscriber_or_mobile',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('eigrp', REFERENCE_LIST, 'Eigrp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Eigrp',
[], [],
''' eigrp
''',
'eigrp',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ospf-or-ospfv3-or-isis-or-application', REFERENCE_LIST, 'OspfOrOspfv3OrIsisOrApplication' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication',
[], [],
''' ospf or ospfv3 or isis or application
''',
'ospf_or_ospfv3_or_isis_or_application',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'redistribution',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions',
False,
[
_MetaInfoClassMember('redistribution', REFERENCE_LIST, 'Redistribution' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution',
[], [],
''' Redistribution of other protocols into
this IS-IS instance
''',
'redistribution',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'redistributions',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals.SpfPeriodicInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals.SpfPeriodicInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('periodic-interval', ATTRIBUTE, 'int' , None, None,
[('0', '3600')], [],
''' Maximum interval in between SPF runs in
seconds
''',
'periodic_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-periodic-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals',
False,
[
_MetaInfoClassMember('spf-periodic-interval', REFERENCE_LIST, 'SpfPeriodicInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals.SpfPeriodicInterval',
[], [],
''' Maximum interval between spf runs
''',
'spf_periodic_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-periodic-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals.SpfInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals.SpfInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('initial-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Initial wait before running a route
calculation in milliseconds
''',
'initial_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('maximum-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Maximum wait before running a route
calculation in milliseconds
''',
'maximum_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('secondary-wait', ATTRIBUTE, 'int' , None, None,
[('0', '120000')], [],
''' Secondary wait before running a route
calculation in milliseconds
''',
'secondary_wait',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals',
False,
[
_MetaInfoClassMember('spf-interval', REFERENCE_LIST, 'SpfInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals.SpfInterval',
[], [],
''' Route calculation scheduling parameters
''',
'spf_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'spf-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.MonitorConvergence' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.MonitorConvergence',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable convergence monitoring
''',
'enable',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('prefix-list', ATTRIBUTE, 'str' , None, None,
[], [],
''' Enable the monitoring of individual
prefixes (prefix list name)
''',
'prefix_list',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('track-ip-frr', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable the Tracking of IP-Frr Convergence
''',
'track_ip_frr',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'monitor-convergence',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.DefaultInformation' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.DefaultInformation',
False,
[
_MetaInfoClassMember('external', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Flag to indicate that the default prefix
should be originated as an external route
''',
'external',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy name
''',
'policy_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('use-policy', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Flag to indicate whether default
origination is controlled using a policy
''',
'use_policy',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'default-information',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances.AdminDistance' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances.AdminDistance',
False,
[
_MetaInfoClassMember('address-prefix', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IP route source prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True, [
_MetaInfoClassMember('address-prefix', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'],
''' IP route source prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('address-prefix', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'],
''' IP route source prefix
''',
'address_prefix',
'Cisco-IOS-XR-clns-isis-cfg', True),
]),
_MetaInfoClassMember('distance', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Administrative distance
''',
'distance',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('prefix-list', ATTRIBUTE, 'str' , None, None,
[], [],
''' List of prefixes to which this distance
applies
''',
'prefix_list',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'admin-distance',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances',
False,
[
_MetaInfoClassMember('admin-distance', REFERENCE_LIST, 'AdminDistance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances.AdminDistance',
[], [],
''' Administrative distance configuration. The
supplied distance is applied to all routes
discovered from the specified source, or
only those that match the supplied prefix
list if this is specified
''',
'admin_distance',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'admin-distances',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States.State' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States.State',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'IsisispfStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisispfStateEnum',
[], [],
''' State
''',
'state',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'state',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States',
False,
[
_MetaInfoClassMember('state', REFERENCE_LIST, 'State' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States.State',
[], [],
''' Enable/disable ISPF
''',
'state',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'states',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Ispf' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Ispf',
False,
[
_MetaInfoClassMember('states', REFERENCE_CLASS, 'States' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States',
[], [],
''' ISPF state (enable/disable)
''',
'states',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'ispf',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.MplsLdpGlobal' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.MplsLdpGlobal',
False,
[
_MetaInfoClassMember('auto-config', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, LDP will be enabled onall IS-IS
interfaces enabled for this address-family
''',
'auto_config',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'mpls-ldp-global',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Mpls.RouterId' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Mpls.RouterId',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address to be used as a router ID.
Precisely one of Address and Interface
must be specified.
''',
'address',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface with designated stable IP
address to be used as a router ID. This
must be a Loopback interface. Precisely
one of Address and Interface must be
specified.
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'router-id',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Mpls' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Mpls',
False,
[
_MetaInfoClassMember('igp-intact', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Install TE and non-TE nexthops in the RIB
''',
'igp_intact',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Enable MPLS for an IS-IS at the given
levels
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('multicast-intact', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Install non-TE nexthops in the RIB for use
by multicast
''',
'multicast_intact',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('router-id', REFERENCE_CLASS, 'RouterId' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Mpls.RouterId',
[], [],
''' Traffic Engineering stable IP address for
system
''',
'router_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'mpls',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric.MetricEnum' : _MetaInfoEnum('MetricEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'maximum':'maximum',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('metric', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False, [
_MetaInfoClassMember('metric', REFERENCE_ENUM_CLASS, 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum',
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
]),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Metrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Metrics',
False,
[
_MetaInfoClassMember('metric', REFERENCE_LIST, 'Metric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric',
[], [],
''' Metric configuration. Legal value depends on
the metric-style specified for the topology. If
the metric-style defined is narrow, then only a
value between <1-63> is allowed and if the
metric-style is defined as wide, then a value
between <1-16777215> is allowed as the metric
value. All routers exclude links with the
maximum wide metric (16777215) from their SPF
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Weights.Weight' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Weights.Weight',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('weight', ATTRIBUTE, 'int' , None, None,
[('1', '16777214')], [],
''' Weight to be configured under interface for
Load Balancing. Allowed weight: <1-16777215>
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weight',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Weights' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Weights',
False,
[
_MetaInfoClassMember('weight', REFERENCE_LIST, 'Weight' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Weights.Weight',
[], [],
''' Weight configuration under interface for load
balancing
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weights',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName',
False,
[
_MetaInfoClassMember('topology-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Topology Name
''',
'topology_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('adjacency-check', REFERENCE_ENUM_CLASS, 'IsisAdjCheckEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAdjCheckEnum',
[], [],
''' Suppress check for consistent AF support on
received IIHs
''',
'adjacency_check',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('admin-distances', REFERENCE_CLASS, 'AdminDistances' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances',
[], [],
''' Per-route administrative
distanceconfiguration
''',
'admin_distances',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('advertise-link-attributes', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, advertise additional link
attributes in our LSP
''',
'advertise_link_attributes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('advertise-passive-only', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' If enabled, advertise prefixes of passive
interfaces only
''',
'advertise_passive_only',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('apply-weight', REFERENCE_ENUM_CLASS, 'IsisApplyWeightEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisApplyWeightEnum',
[], [],
''' Apply weights to UCMP or ECMP only
''',
'apply_weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('attached-bit', REFERENCE_ENUM_CLASS, 'IsisAttachedBitEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAttachedBitEnum',
[], [],
''' Set the attached bit in this router's level
1 System LSP
''',
'attached_bit',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('default-admin-distance', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Default IS-IS administrative distance
configuration.
''',
'default_admin_distance',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('default-information', REFERENCE_CLASS, 'DefaultInformation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.DefaultInformation',
[], [],
''' Control origination of a default route with
the option of using a policy. If no policy
is specified the default route is
advertised with zero cost in level 2 only.
''',
'default_information',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-table', REFERENCE_CLASS, 'FrrTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable',
[], [],
''' Fast-ReRoute configuration
''',
'frr_table',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ignore-attached-bit', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, Ignore other routers attached bit
''',
'ignore_attached_bit',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ispf', REFERENCE_CLASS, 'Ispf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Ispf',
[], [],
''' ISPF configuration
''',
'ispf',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('max-redist-prefixes', REFERENCE_CLASS, 'MaxRedistPrefixes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes',
[], [],
''' Maximum number of redistributed
prefixesconfiguration
''',
'max_redist_prefixes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('maximum-paths', ATTRIBUTE, 'int' , None, None,
[('1', '64')], [],
''' Maximum number of active parallel paths per
route
''',
'maximum_paths',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-styles', REFERENCE_CLASS, 'MetricStyles' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles',
[], [],
''' Metric-style configuration
''',
'metric_styles',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metrics', REFERENCE_CLASS, 'Metrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Metrics',
[], [],
''' Metric configuration
''',
'metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('micro-loop-avoidance', REFERENCE_CLASS, 'MicroLoopAvoidance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MicroLoopAvoidance',
[], [],
''' Micro Loop Avoidance configuration
''',
'micro_loop_avoidance',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('monitor-convergence', REFERENCE_CLASS, 'MonitorConvergence' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MonitorConvergence',
[], [],
''' Enable convergence monitoring
''',
'monitor_convergence',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mpls', REFERENCE_CLASS, 'Mpls' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Mpls',
[], [],
''' MPLS configuration. MPLS configuration will
only be applied for the IPv4-unicast
address-family.
''',
'mpls',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mpls-ldp-global', REFERENCE_CLASS, 'MplsLdpGlobal' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MplsLdpGlobal',
[], [],
''' MPLS LDP configuration. MPLS LDP
configuration will only be applied for the
IPv4-unicast address-family.
''',
'mpls_ldp_global',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('propagations', REFERENCE_CLASS, 'Propagations' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Propagations',
[], [],
''' Route propagation configuration
''',
'propagations',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('redistributions', REFERENCE_CLASS, 'Redistributions' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions',
[], [],
''' Protocol redistribution configuration
''',
'redistributions',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-source-first-hop', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, routes will be installed with the
IP address of the first-hop node as the
source instead of the originating node
''',
'route_source_first_hop',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('router-id', REFERENCE_CLASS, 'RouterId' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.RouterId',
[], [],
''' Stable IP address for system. Will only be
applied for the unicast sub-address-family.
''',
'router_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('segment-routing', REFERENCE_CLASS, 'SegmentRouting' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting',
[], [],
''' Enable Segment Routing configuration
''',
'segment_routing',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('single-topology', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Run IPv6 Unicast using the standard (IPv4
Unicast) topology
''',
'single_topology',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('spf-intervals', REFERENCE_CLASS, 'SpfIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals',
[], [],
''' SPF-interval configuration
''',
'spf_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('spf-periodic-intervals', REFERENCE_CLASS, 'SpfPeriodicIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals',
[], [],
''' Peoridic SPF configuration
''',
'spf_periodic_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('spf-prefix-priorities', REFERENCE_CLASS, 'SpfPrefixPriorities' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities',
[], [],
''' SPF Prefix Priority configuration
''',
'spf_prefix_priorities',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('summary-prefixes', REFERENCE_CLASS, 'SummaryPrefixes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes',
[], [],
''' Summary-prefix configuration
''',
'summary_prefixes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('topology-id', ATTRIBUTE, 'int' , None, None,
[('6', '4095')], [],
''' Set the topology ID for a named
(non-default) topology. This object must be
set before any other configuration is
supplied for a named (non-default) topology
, and must be the last configuration object
to be removed. This item should not be
supplied for the non-named default
topologies.
''',
'topology_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ucmp', REFERENCE_CLASS, 'Ucmp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp',
[], [],
''' UCMP (UnEqual Cost MultiPath) configuration
''',
'ucmp',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('weights', REFERENCE_CLASS, 'Weights' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Weights',
[], [],
''' Weight configuration
''',
'weights',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'topology-name',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'IsisAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisAddressFamilyEnum',
[], [],
''' Address family
''',
'af_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'IsisSubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisSubAddressFamilyEnum',
[], [],
''' Sub address family
''',
'saf_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('af-data', REFERENCE_CLASS, 'AfData' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.AfData',
[], [],
''' Data container.
''',
'af_data',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('topology-name', REFERENCE_LIST, 'TopologyName' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName',
[], [],
''' keys: topology-name
''',
'topology_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'af',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs',
False,
[
_MetaInfoClassMember('af', REFERENCE_LIST, 'Af' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af',
[], [],
''' Configuration for an IS-IS address-family. If
a named (non-default) topology is being
created it must be multicast.
''',
'af',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'afs',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspRefreshIntervals.LspRefreshInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspRefreshIntervals.LspRefreshInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Seconds
''',
'interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-refresh-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspRefreshIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspRefreshIntervals',
False,
[
_MetaInfoClassMember('lsp-refresh-interval', REFERENCE_LIST, 'LspRefreshInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspRefreshIntervals.LspRefreshInterval',
[], [],
''' Interval between re-flooding of unchanged
LSPs
''',
'lsp_refresh_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-refresh-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Distribute' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Distribute',
False,
[
_MetaInfoClassMember('dist-inst-id', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Instance ID
''',
'dist_inst_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('dist-throttle', ATTRIBUTE, 'int' , None, None,
[('5', '20')], [],
''' Seconds
''',
'dist_throttle',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Level
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'distribute',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspAcceptPasswords.LspAcceptPassword' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspAcceptPasswords.LspAcceptPassword',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('password', ATTRIBUTE, 'str' , None, None,
[], ['(!.+)|([^!].+)'],
''' Password
''',
'password',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-accept-password',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspAcceptPasswords' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspAcceptPasswords',
False,
[
_MetaInfoClassMember('lsp-accept-password', REFERENCE_LIST, 'LspAcceptPassword' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspAcceptPasswords.LspAcceptPassword',
[], [],
''' LSP/SNP accept passwords. This requires the
existence of an LSPPassword of the same level
.
''',
'lsp_accept_password',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-accept-passwords',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspMtus.LspMtu' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspMtus.LspMtu',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('mtu', ATTRIBUTE, 'int' , None, None,
[('128', '4352')], [],
''' Bytes
''',
'mtu',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-mtu',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspMtus' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspMtus',
False,
[
_MetaInfoClassMember('lsp-mtu', REFERENCE_LIST, 'LspMtu' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspMtus.LspMtu',
[], [],
''' LSP MTU
''',
'lsp_mtu',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-mtus',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Nsf' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Nsf',
False,
[
_MetaInfoClassMember('flavor', REFERENCE_ENUM_CLASS, 'IsisNsfFlavorEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisNsfFlavorEnum',
[], [],
''' NSF not configured if item is deleted
''',
'flavor',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-timer', ATTRIBUTE, 'int' , None, None,
[('1', '20')], [],
''' Per-interface time period to wait for a
restart ACK during an IETF-NSF restart. This
configuration has no effect if IETF-NSF is
not configured
''',
'interface_timer',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lifetime', ATTRIBUTE, 'int' , None, None,
[('5', '300')], [],
''' Maximum route lifetime following restart.
When this lifetime expires, old routes will
be purged from the RIB.
''',
'lifetime',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('max-interface-timer-expiry', ATTRIBUTE, 'int' , None, None,
[('1', '10')], [],
''' Maximum number of times an interface timer
may expire during an IETF-NSF restart before
the NSF restart is aborted. This
configuration has no effect if IETF NSF is
not configured.
''',
'max_interface_timer_expiry',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'nsf',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LinkGroups.LinkGroup' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LinkGroups.LinkGroup',
False,
[
_MetaInfoClassMember('link-group-name', ATTRIBUTE, 'str' , None, None,
[(0, 40)], [],
''' Link Group Name
''',
'link_group_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Flag to indicate that linkgroup should be
running. This must be the first object
created when a linkgroup is configured, and
the last object deleted when it is
deconfigured. When this object is deleted,
the IS-IS linkgroup will be removed.
''',
'enable',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-offset', ATTRIBUTE, 'int' , None, None,
[('0', '16777215')], [],
''' Metric for redistributed routes: <0-63> for
narrow, <0-16777215> for wide
''',
'metric_offset',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('minimum-members', ATTRIBUTE, 'int' , None, None,
[('2', '64')], [],
''' Minimum Members
''',
'minimum_members',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('revert-members', ATTRIBUTE, 'int' , None, None,
[('2', '64')], [],
''' Revert Members
''',
'revert_members',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'link-group',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LinkGroups' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LinkGroups',
False,
[
_MetaInfoClassMember('link-group', REFERENCE_LIST, 'LinkGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LinkGroups.LinkGroup',
[], [],
''' Configuration for link group name
''',
'link_group',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'link-groups',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspCheckIntervals.LspCheckInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspCheckIntervals.LspCheckInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('10', '65535')], [],
''' LSP checksum check interval time in seconds
''',
'interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-check-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspCheckIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspCheckIntervals',
False,
[
_MetaInfoClassMember('lsp-check-interval', REFERENCE_LIST, 'LspCheckInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspCheckIntervals.LspCheckInterval',
[], [],
''' LSP checksum check interval parameters
''',
'lsp_check_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-check-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspPasswords.LspPassword' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspPasswords.LspPassword',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('algorithm', REFERENCE_ENUM_CLASS, 'IsisAuthenticationAlgorithmEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAuthenticationAlgorithmEnum',
[], [],
''' Algorithm
''',
'algorithm',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('authentication-type', REFERENCE_ENUM_CLASS, 'IsisSnpAuthEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisSnpAuthEnum',
[], [],
''' SNP packet authentication mode
''',
'authentication_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('failure-mode', REFERENCE_ENUM_CLASS, 'IsisAuthenticationFailureModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAuthenticationFailureModeEnum',
[], [],
''' Failure Mode
''',
'failure_mode',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('password', ATTRIBUTE, 'str' , None, None,
[], ['(!.+)|([^!].+)'],
''' Password or unencrypted Key Chain name
''',
'password',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-password',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspPasswords' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspPasswords',
False,
[
_MetaInfoClassMember('lsp-password', REFERENCE_LIST, 'LspPassword' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspPasswords.LspPassword',
[], [],
''' LSP/SNP passwords. This must exist if an
LSPAcceptPassword of the same level exists.
''',
'lsp_password',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-passwords',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Nets.Net' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Nets.Net',
False,
[
_MetaInfoClassMember('net-name', ATTRIBUTE, 'str' , None, None,
[], ['[a-fA-F0-9]{2}(\\.[a-fA-F0-9]{4}){3,9}\\.[a-fA-F0-9]{2}'],
''' Network Entity Title
''',
'net_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
],
'Cisco-IOS-XR-clns-isis-cfg',
'net',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Nets' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Nets',
False,
[
_MetaInfoClassMember('net', REFERENCE_LIST, 'Net' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Nets.Net',
[], [],
''' Network Entity Title (NET)
''',
'net',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'nets',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspLifetimes.LspLifetime' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspLifetimes.LspLifetime',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('lifetime', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Seconds
''',
'lifetime',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-lifetime',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.LspLifetimes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.LspLifetimes',
False,
[
_MetaInfoClassMember('lsp-lifetime', REFERENCE_LIST, 'LspLifetime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspLifetimes.LspLifetime',
[], [],
''' Maximum LSP lifetime
''',
'lsp_lifetime',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-lifetimes',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.OverloadBits.OverloadBit' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.OverloadBits.OverloadBit',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('external-adv-type', REFERENCE_ENUM_CLASS, 'IsisAdvTypeExternalEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAdvTypeExternalEnum',
[], [],
''' Advertise prefixes from other protocols
''',
'external_adv_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('hippity-period', ATTRIBUTE, 'int' , None, None,
[('5', '86400')], [],
''' Time in seconds to advertise ourself as
overloaded after process startup
''',
'hippity_period',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('inter-level-adv-type', REFERENCE_ENUM_CLASS, 'IsisAdvTypeInterLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAdvTypeInterLevelEnum',
[], [],
''' Advertise prefixes across ISIS levels
''',
'inter_level_adv_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('overload-bit-mode', REFERENCE_ENUM_CLASS, 'IsisOverloadBitModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisOverloadBitModeEnum',
[], [],
''' Circumstances under which the overload bit
is set in the system LSP
''',
'overload_bit_mode',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'overload-bit',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.OverloadBits' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.OverloadBits',
False,
[
_MetaInfoClassMember('overload-bit', REFERENCE_LIST, 'OverloadBit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.OverloadBits.OverloadBit',
[], [],
''' Set the overload bit in the System LSP so
that other routers avoid this one in SPF
calculations. This may be done either
unconditionally, or on startup until either a
set time has passed or IS-IS is informed that
BGP has converged. This is an Object with a
union discriminated on an integer value of
the ISISOverloadBitModeType.
''',
'overload_bit',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'overload-bits',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals.LspRetransmitThrottleInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals.LspRetransmitThrottleInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Milliseconds
''',
'interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-retransmit-throttle-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals',
False,
[
_MetaInfoClassMember('lsp-retransmit-throttle-interval', REFERENCE_LIST, 'LspRetransmitThrottleInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals.LspRetransmitThrottleInterval',
[], [],
''' Minimum interval betwen retransissions of
different LSPs
''',
'lsp_retransmit_throttle_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-retransmit-throttle-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals.LspRetransmitInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals.LspRetransmitInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Seconds
''',
'interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-retransmit-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals',
False,
[
_MetaInfoClassMember('lsp-retransmit-interval', REFERENCE_LIST, 'LspRetransmitInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals.LspRetransmitInterval',
[], [],
''' Interval between retransmissions of the
same LSP
''',
'lsp_retransmit_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-retransmit-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.Bfd' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.Bfd',
False,
[
_MetaInfoClassMember('detection-multiplier', ATTRIBUTE, 'int' , None, None,
[('2', '50')], [],
''' Detection multiplier for BFD sessions
created by isis
''',
'detection_multiplier',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('enable-ipv4', ATTRIBUTE, 'bool' , None, None,
[], [],
''' TRUE to enable BFD. FALSE to disable and to
prevent inheritance from a parent
''',
'enable_ipv4',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('enable-ipv6', ATTRIBUTE, 'bool' , None, None,
[], [],
''' TRUE to enable BFD. FALSE to disable and to
prevent inheritance from a parent
''',
'enable_ipv6',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('3', '30000')], [],
''' Hello interval for BFD sessions created by
isis
''',
'interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'bfd',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.Priorities.Priority' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.Priorities.Priority',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('priority-value', ATTRIBUTE, 'int' , None, None,
[('0', '127')], [],
''' Priority
''',
'priority_value',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'priority',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.Priorities' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.Priorities',
False,
[
_MetaInfoClassMember('priority', REFERENCE_LIST, 'Priority' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.Priorities.Priority',
[], [],
''' DIS-election priority
''',
'priority',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'priorities',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords.HelloAcceptPassword' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords.HelloAcceptPassword',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('password', ATTRIBUTE, 'str' , None, None,
[], ['(!.+)|([^!].+)'],
''' Password
''',
'password',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'hello-accept-password',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords',
False,
[
_MetaInfoClassMember('hello-accept-password', REFERENCE_LIST, 'HelloAcceptPassword' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords.HelloAcceptPassword',
[], [],
''' IIH accept passwords. This requires the
existence of a HelloPassword of the same
level.
''',
'hello_accept_password',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'hello-accept-passwords',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.HelloPasswords.HelloPassword' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.HelloPasswords.HelloPassword',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('algorithm', REFERENCE_ENUM_CLASS, 'IsisAuthenticationAlgorithmEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAuthenticationAlgorithmEnum',
[], [],
''' Algorithm
''',
'algorithm',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('failure-mode', REFERENCE_ENUM_CLASS, 'IsisAuthenticationFailureModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAuthenticationFailureModeEnum',
[], [],
''' Failure Mode
''',
'failure_mode',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('password', ATTRIBUTE, 'str' , None, None,
[], ['(!.+)|([^!].+)'],
''' Password or unencrypted Key Chain name
''',
'password',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'hello-password',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.HelloPasswords' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.HelloPasswords',
False,
[
_MetaInfoClassMember('hello-password', REFERENCE_LIST, 'HelloPassword' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.HelloPasswords.HelloPassword',
[], [],
''' IIH passwords. This must exist if a
HelloAcceptPassword of the same level
exists.
''',
'hello_password',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'hello-passwords',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.HelloPaddings.HelloPadding' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.HelloPaddings.HelloPadding',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('padding-type', REFERENCE_ENUM_CLASS, 'IsisHelloPaddingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisHelloPaddingEnum',
[], [],
''' Hello padding type value
''',
'padding_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'hello-padding',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.HelloPaddings' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.HelloPaddings',
False,
[
_MetaInfoClassMember('hello-padding', REFERENCE_LIST, 'HelloPadding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.HelloPaddings.HelloPadding',
[], [],
''' Pad IIHs to the interface MTU
''',
'hello_padding',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'hello-paddings',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers.HelloMultiplier' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers.HelloMultiplier',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('multiplier', ATTRIBUTE, 'int' , None, None,
[('3', '1000')], [],
''' Hello multiplier value
''',
'multiplier',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'hello-multiplier',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers',
False,
[
_MetaInfoClassMember('hello-multiplier', REFERENCE_LIST, 'HelloMultiplier' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers.HelloMultiplier',
[], [],
''' Hello-multiplier configuration. The number
of successive IIHs that may be missed on an
adjacency before it is considered down.
''',
'hello_multiplier',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'hello-multipliers',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds.LspFastFloodThreshold' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds.LspFastFloodThreshold',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('count', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Count
''',
'count',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-fast-flood-threshold',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds',
False,
[
_MetaInfoClassMember('lsp-fast-flood-threshold', REFERENCE_LIST, 'LspFastFloodThreshold' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds.LspFastFloodThreshold',
[], [],
''' Number of LSPs to send back to back on an
interface.
''',
'lsp_fast_flood_threshold',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-fast-flood-thresholds',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.HelloIntervals.HelloInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.HelloIntervals.HelloInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Seconds
''',
'interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'hello-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.HelloIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.HelloIntervals',
False,
[
_MetaInfoClassMember('hello-interval', REFERENCE_LIST, 'HelloInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.HelloIntervals.HelloInterval',
[], [],
''' Hello-interval configuration. The interval
at which IIH packets will be sent. This
will be three times quicker on a LAN
interface which has been electted DIS.
''',
'hello_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'hello-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.PrefixSid' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.PrefixSid',
False,
[
_MetaInfoClassMember('explicit-null', REFERENCE_ENUM_CLASS, 'IsisexplicitNullFlagEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisexplicitNullFlagEnum',
[], [],
''' Enable/Disable Explicit-NULL flag
''',
'explicit_null',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('nflag-clear', REFERENCE_ENUM_CLASS, 'NflagClearEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'NflagClearEnum',
[], [],
''' Clear N-flag for the prefix-SID
''',
'nflag_clear',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('php', REFERENCE_ENUM_CLASS, 'IsisphpFlagEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisphpFlagEnum',
[], [],
''' Enable/Disable Penultimate Hop Popping
''',
'php',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'IsissidEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsissidEnum',
[], [],
''' SID type for the interface
''',
'type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('0', '1048575')], [],
''' SID value for the interface
''',
'value',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'prefix-sid',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface',
False,
[
_MetaInfoClassMember('frr-type', REFERENCE_ENUM_CLASS, 'IsisfrrEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrEnum',
[], [],
''' Computation Type
''',
'frr_type',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('level', ATTRIBUTE, 'int' , None, None,
[('0', '2')], [],
''' Level
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frrlfa-candidate-interface',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces',
False,
[
_MetaInfoClassMember('frrlfa-candidate-interface', REFERENCE_LIST, 'FrrlfaCandidateInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface',
[], [],
''' Include an interface to LFA candidate
in computation
''',
'frrlfa_candidate_interface',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frrlfa-candidate-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('max-metric', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Value of the metric
''',
'max_metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-max-metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics',
False,
[
_MetaInfoClassMember('frr-remote-lfa-max-metric', REFERENCE_LIST, 'FrrRemoteLfaMaxMetric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric',
[], [],
''' Configure the maximum metric for
selecting a remote LFA node
''',
'frr_remote_lfa_max_metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-max-metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes.FrrType' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes.FrrType',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'IsisfrrEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrEnum',
[], [],
''' Computation Type
''',
'type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-type',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes',
False,
[
_MetaInfoClassMember('frr-type', REFERENCE_LIST, 'FrrType' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes.FrrType',
[], [],
''' Type of computation for prefixes
reachable via interface
''',
'frr_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-types',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'IsisRemoteLfaEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisRemoteLfaEnum',
[], [],
''' Remote LFA Type
''',
'type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-type',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes',
False,
[
_MetaInfoClassMember('frr-remote-lfa-type', REFERENCE_LIST, 'FrrRemoteLfaType' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType',
[], [],
''' Enable remote lfa for a particular
level
''',
'frr_remote_lfa_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-types',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults.InterfaceFrrTiebreakerDefault' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults.InterfaceFrrTiebreakerDefault',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-tiebreaker-default',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults',
False,
[
_MetaInfoClassMember('interface-frr-tiebreaker-default', REFERENCE_LIST, 'InterfaceFrrTiebreakerDefault' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults.InterfaceFrrTiebreakerDefault',
[], [],
''' Configure default tiebreaker
''',
'interface_frr_tiebreaker_default',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-tiebreaker-defaults',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frrtilfa-type',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes',
False,
[
_MetaInfoClassMember('frrtilfa-type', REFERENCE_LIST, 'FrrtilfaType' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType',
[], [],
''' Enable TI lfa for a particular level
''',
'frrtilfa_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frrtilfa-types',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface',
False,
[
_MetaInfoClassMember('frr-type', REFERENCE_ENUM_CLASS, 'IsisfrrEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrEnum',
[], [],
''' Computation Type
''',
'frr_type',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('level', ATTRIBUTE, 'int' , None, None,
[('0', '2')], [],
''' Level
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-exclude-interface',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces',
False,
[
_MetaInfoClassMember('frr-exclude-interface', REFERENCE_LIST, 'FrrExcludeInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface',
[], [],
''' Exclude an interface from computation
''',
'frr_exclude_interface',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-exclude-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('tiebreaker', REFERENCE_ENUM_CLASS, 'IsisInterfaceFrrTiebreakerEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisInterfaceFrrTiebreakerEnum',
[], [],
''' Tiebreaker for which configuration
applies
''',
'tiebreaker',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Preference order among tiebreakers
''',
'index',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-tiebreaker',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakers' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakers',
False,
[
_MetaInfoClassMember('interface-frr-tiebreaker', REFERENCE_LIST, 'InterfaceFrrTiebreaker' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker',
[], [],
''' Configure tiebreaker for multiple
backups
''',
'interface_frr_tiebreaker',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-tiebreakers',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable',
False,
[
_MetaInfoClassMember('frr-exclude-interfaces', REFERENCE_CLASS, 'FrrExcludeInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces',
[], [],
''' FRR exclusion configuration
''',
'frr_exclude_interfaces',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-remote-lfa-max-metrics', REFERENCE_CLASS, 'FrrRemoteLfaMaxMetrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics',
[], [],
''' Remote LFA maxmimum metric
''',
'frr_remote_lfa_max_metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-remote-lfa-types', REFERENCE_CLASS, 'FrrRemoteLfaTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes',
[], [],
''' Remote LFA Enable
''',
'frr_remote_lfa_types',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-types', REFERENCE_CLASS, 'FrrTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes',
[], [],
''' Type of FRR computation per level
''',
'frr_types',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frrlfa-candidate-interfaces', REFERENCE_CLASS, 'FrrlfaCandidateInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces',
[], [],
''' FRR LFA candidate configuration
''',
'frrlfa_candidate_interfaces',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frrtilfa-types', REFERENCE_CLASS, 'FrrtilfaTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes',
[], [],
''' TI LFA Enable
''',
'frrtilfa_types',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-frr-tiebreaker-defaults', REFERENCE_CLASS, 'InterfaceFrrTiebreakerDefaults' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults',
[], [],
''' Interface FRR Default tiebreaker
configuration
''',
'interface_frr_tiebreaker_defaults',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-frr-tiebreakers', REFERENCE_CLASS, 'InterfaceFrrTiebreakers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakers',
[], [],
''' Interface FRR tiebreakers configuration
''',
'interface_frr_tiebreakers',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-table',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.MplsLdp' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.MplsLdp',
False,
[
_MetaInfoClassMember('sync-level', ATTRIBUTE, 'int' , None, None,
[('0', '2')], [],
''' Enable MPLS LDP Synchronization for an
IS-IS level
''',
'sync_level',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'mpls-ldp',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics.AutoMetric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics.AutoMetric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('proactive-protect', ATTRIBUTE, 'int' , None, None,
[('1', '16777214')], [],
''' Allowed auto metric:<1-63> for narrow
,<1-16777214> for wide
''',
'proactive_protect',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'auto-metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics',
False,
[
_MetaInfoClassMember('auto-metric', REFERENCE_LIST, 'AutoMetric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics.AutoMetric',
[], [],
''' AutoMetric Proactive-Protect
configuration. Legal value depends on
the metric-style specified for the
topology. If the metric-style defined is
narrow, then only a value between <1-63>
is allowed and if the metric-style is
defined as wide, then a value between
<1-16777214> is allowed as the
auto-metric value.
''',
'auto_metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'auto-metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags.AdminTag' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags.AdminTag',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('admin-tag', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Tag to associate with connected routes
''',
'admin_tag',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'admin-tag',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags',
False,
[
_MetaInfoClassMember('admin-tag', REFERENCE_LIST, 'AdminTag' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags.AdminTag',
[], [],
''' Admin tag for advertised interface
connected routes
''',
'admin_tag',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'admin-tags',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceLinkGroup' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceLinkGroup',
False,
[
_MetaInfoClassMember('level', ATTRIBUTE, 'int' , None, None,
[('0', '2')], [],
''' Level in which link group will be
effective
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('link-group', ATTRIBUTE, 'str' , None, None,
[(0, 40)], [],
''' Link Group
''',
'link_group',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-link-group',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics.Metric.MetricEnum' : _MetaInfoEnum('MetricEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'maximum':'maximum',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics.Metric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics.Metric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('metric', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False, [
_MetaInfoClassMember('metric', REFERENCE_ENUM_CLASS, 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum',
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
]),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics',
False,
[
_MetaInfoClassMember('metric', REFERENCE_LIST, 'Metric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics.Metric',
[], [],
''' Metric configuration. Legal value depends on
the metric-style specified for the topology. If
the metric-style defined is narrow, then only a
value between <1-63> is allowed and if the
metric-style is defined as wide, then a value
between <1-16777215> is allowed as the metric
value. All routers exclude links with the
maximum wide metric (16777215) from their SPF
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights.Weight' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights.Weight',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('weight', ATTRIBUTE, 'int' , None, None,
[('1', '16777214')], [],
''' Weight to be configured under interface for
Load Balancing. Allowed weight: <1-16777215>
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weight',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights',
False,
[
_MetaInfoClassMember('weight', REFERENCE_LIST, 'Weight' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights.Weight',
[], [],
''' Weight configuration under interface for load
balancing
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weights',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData',
False,
[
_MetaInfoClassMember('admin-tags', REFERENCE_CLASS, 'AdminTags' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags',
[], [],
''' admin-tag configuration
''',
'admin_tags',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('auto-metrics', REFERENCE_CLASS, 'AutoMetrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics',
[], [],
''' AutoMetric configuration
''',
'auto_metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-af-state', REFERENCE_ENUM_CLASS, 'IsisInterfaceAfStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisInterfaceAfStateEnum',
[], [],
''' Interface state
''',
'interface_af_state',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-frr-table', REFERENCE_CLASS, 'InterfaceFrrTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable',
[], [],
''' Fast-ReRoute configuration
''',
'interface_frr_table',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-link-group', REFERENCE_CLASS, 'InterfaceLinkGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceLinkGroup',
[], [],
''' Provide link group name and level
''',
'interface_link_group',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metrics', REFERENCE_CLASS, 'Metrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics',
[], [],
''' Metric configuration
''',
'metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mpls-ldp', REFERENCE_CLASS, 'MplsLdp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.MplsLdp',
[], [],
''' MPLS LDP configuration
''',
'mpls_ldp',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('prefix-sid', REFERENCE_CLASS, 'PrefixSid' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.PrefixSid',
[], [],
''' Assign prefix SID to an interface,
ISISPHPFlag will be rejected if set to
disable, ISISEXPLICITNULLFlag will
override the value of ISISPHPFlag
''',
'prefix_sid',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('running', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' The presence of this object allows an
address-family to be run over the
interface in question.This must be the
first object created under the
InterfaceAddressFamily container, and the
last one deleted
''',
'running',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('weights', REFERENCE_CLASS, 'Weights' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights',
[], [],
''' Weight configuration
''',
'weights',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-af-data',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.PrefixSid' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.PrefixSid',
False,
[
_MetaInfoClassMember('explicit-null', REFERENCE_ENUM_CLASS, 'IsisexplicitNullFlagEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisexplicitNullFlagEnum',
[], [],
''' Enable/Disable Explicit-NULL flag
''',
'explicit_null',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('nflag-clear', REFERENCE_ENUM_CLASS, 'NflagClearEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'NflagClearEnum',
[], [],
''' Clear N-flag for the prefix-SID
''',
'nflag_clear',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('php', REFERENCE_ENUM_CLASS, 'IsisphpFlagEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisphpFlagEnum',
[], [],
''' Enable/Disable Penultimate Hop Popping
''',
'php',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'IsissidEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsissidEnum',
[], [],
''' SID type for the interface
''',
'type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('0', '1048575')], [],
''' SID value for the interface
''',
'value',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'prefix-sid',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface',
False,
[
_MetaInfoClassMember('frr-type', REFERENCE_ENUM_CLASS, 'IsisfrrEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrEnum',
[], [],
''' Computation Type
''',
'frr_type',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('level', ATTRIBUTE, 'int' , None, None,
[('0', '2')], [],
''' Level
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frrlfa-candidate-interface',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces',
False,
[
_MetaInfoClassMember('frrlfa-candidate-interface', REFERENCE_LIST, 'FrrlfaCandidateInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface',
[], [],
''' Include an interface to LFA candidate
in computation
''',
'frrlfa_candidate_interface',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frrlfa-candidate-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('max-metric', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Value of the metric
''',
'max_metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-max-metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics',
False,
[
_MetaInfoClassMember('frr-remote-lfa-max-metric', REFERENCE_LIST, 'FrrRemoteLfaMaxMetric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric',
[], [],
''' Configure the maximum metric for
selecting a remote LFA node
''',
'frr_remote_lfa_max_metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-max-metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes.FrrType' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes.FrrType',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'IsisfrrEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrEnum',
[], [],
''' Computation Type
''',
'type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-type',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes',
False,
[
_MetaInfoClassMember('frr-type', REFERENCE_LIST, 'FrrType' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes.FrrType',
[], [],
''' Type of computation for prefixes
reachable via interface
''',
'frr_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-types',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'IsisRemoteLfaEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisRemoteLfaEnum',
[], [],
''' Remote LFA Type
''',
'type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-type',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes',
False,
[
_MetaInfoClassMember('frr-remote-lfa-type', REFERENCE_LIST, 'FrrRemoteLfaType' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType',
[], [],
''' Enable remote lfa for a particular
level
''',
'frr_remote_lfa_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-remote-lfa-types',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults.InterfaceFrrTiebreakerDefault' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults.InterfaceFrrTiebreakerDefault',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-tiebreaker-default',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults',
False,
[
_MetaInfoClassMember('interface-frr-tiebreaker-default', REFERENCE_LIST, 'InterfaceFrrTiebreakerDefault' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults.InterfaceFrrTiebreakerDefault',
[], [],
''' Configure default tiebreaker
''',
'interface_frr_tiebreaker_default',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-tiebreaker-defaults',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frrtilfa-type',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes',
False,
[
_MetaInfoClassMember('frrtilfa-type', REFERENCE_LIST, 'FrrtilfaType' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType',
[], [],
''' Enable TI lfa for a particular level
''',
'frrtilfa_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frrtilfa-types',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface',
False,
[
_MetaInfoClassMember('frr-type', REFERENCE_ENUM_CLASS, 'IsisfrrEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisfrrEnum',
[], [],
''' Computation Type
''',
'frr_type',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('level', ATTRIBUTE, 'int' , None, None,
[('0', '2')], [],
''' Level
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-exclude-interface',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces',
False,
[
_MetaInfoClassMember('frr-exclude-interface', REFERENCE_LIST, 'FrrExcludeInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface',
[], [],
''' Exclude an interface from computation
''',
'frr_exclude_interface',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-exclude-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('tiebreaker', REFERENCE_ENUM_CLASS, 'IsisInterfaceFrrTiebreakerEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisInterfaceFrrTiebreakerEnum',
[], [],
''' Tiebreaker for which configuration
applies
''',
'tiebreaker',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Preference order among tiebreakers
''',
'index',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-tiebreaker',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers',
False,
[
_MetaInfoClassMember('interface-frr-tiebreaker', REFERENCE_LIST, 'InterfaceFrrTiebreaker' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker',
[], [],
''' Configure tiebreaker for multiple
backups
''',
'interface_frr_tiebreaker',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-tiebreakers',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable',
False,
[
_MetaInfoClassMember('frr-exclude-interfaces', REFERENCE_CLASS, 'FrrExcludeInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces',
[], [],
''' FRR exclusion configuration
''',
'frr_exclude_interfaces',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-remote-lfa-max-metrics', REFERENCE_CLASS, 'FrrRemoteLfaMaxMetrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics',
[], [],
''' Remote LFA maxmimum metric
''',
'frr_remote_lfa_max_metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-remote-lfa-types', REFERENCE_CLASS, 'FrrRemoteLfaTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes',
[], [],
''' Remote LFA Enable
''',
'frr_remote_lfa_types',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-types', REFERENCE_CLASS, 'FrrTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes',
[], [],
''' Type of FRR computation per level
''',
'frr_types',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frrlfa-candidate-interfaces', REFERENCE_CLASS, 'FrrlfaCandidateInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces',
[], [],
''' FRR LFA candidate configuration
''',
'frrlfa_candidate_interfaces',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frrtilfa-types', REFERENCE_CLASS, 'FrrtilfaTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes',
[], [],
''' TI LFA Enable
''',
'frrtilfa_types',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-frr-tiebreaker-defaults', REFERENCE_CLASS, 'InterfaceFrrTiebreakerDefaults' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults',
[], [],
''' Interface FRR Default tiebreaker
configuration
''',
'interface_frr_tiebreaker_defaults',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-frr-tiebreakers', REFERENCE_CLASS, 'InterfaceFrrTiebreakers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers',
[], [],
''' Interface FRR tiebreakers configuration
''',
'interface_frr_tiebreakers',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-table',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.MplsLdp' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.MplsLdp',
False,
[
_MetaInfoClassMember('sync-level', ATTRIBUTE, 'int' , None, None,
[('0', '2')], [],
''' Enable MPLS LDP Synchronization for an
IS-IS level
''',
'sync_level',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'mpls-ldp',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics.AutoMetric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics.AutoMetric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('proactive-protect', ATTRIBUTE, 'int' , None, None,
[('1', '16777214')], [],
''' Allowed auto metric:<1-63> for narrow
,<1-16777214> for wide
''',
'proactive_protect',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'auto-metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics',
False,
[
_MetaInfoClassMember('auto-metric', REFERENCE_LIST, 'AutoMetric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics.AutoMetric',
[], [],
''' AutoMetric Proactive-Protect
configuration. Legal value depends on
the metric-style specified for the
topology. If the metric-style defined is
narrow, then only a value between <1-63>
is allowed and if the metric-style is
defined as wide, then a value between
<1-16777214> is allowed as the
auto-metric value.
''',
'auto_metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'auto-metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags.AdminTag' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags.AdminTag',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('admin-tag', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Tag to associate with connected routes
''',
'admin_tag',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'admin-tag',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags',
False,
[
_MetaInfoClassMember('admin-tag', REFERENCE_LIST, 'AdminTag' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags.AdminTag',
[], [],
''' Admin tag for advertised interface
connected routes
''',
'admin_tag',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'admin-tags',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceLinkGroup' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceLinkGroup',
False,
[
_MetaInfoClassMember('level', ATTRIBUTE, 'int' , None, None,
[('0', '2')], [],
''' Level in which link group will be
effective
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('link-group', ATTRIBUTE, 'str' , None, None,
[(0, 40)], [],
''' Link Group
''',
'link_group',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-link-group',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum' : _MetaInfoEnum('MetricEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'maximum':'maximum',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('metric', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False, [
_MetaInfoClassMember('metric', REFERENCE_ENUM_CLASS, 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum',
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
]),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics',
False,
[
_MetaInfoClassMember('metric', REFERENCE_LIST, 'Metric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric',
[], [],
''' Metric configuration. Legal value depends on
the metric-style specified for the topology. If
the metric-style defined is narrow, then only a
value between <1-63> is allowed and if the
metric-style is defined as wide, then a value
between <1-16777215> is allowed as the metric
value. All routers exclude links with the
maximum wide metric (16777215) from their SPF
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights.Weight' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights.Weight',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('weight', ATTRIBUTE, 'int' , None, None,
[('1', '16777214')], [],
''' Weight to be configured under interface for
Load Balancing. Allowed weight: <1-16777215>
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weight',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights',
False,
[
_MetaInfoClassMember('weight', REFERENCE_LIST, 'Weight' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights.Weight',
[], [],
''' Weight configuration under interface for load
balancing
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weights',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName',
False,
[
_MetaInfoClassMember('topology-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Topology Name
''',
'topology_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('admin-tags', REFERENCE_CLASS, 'AdminTags' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags',
[], [],
''' admin-tag configuration
''',
'admin_tags',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('auto-metrics', REFERENCE_CLASS, 'AutoMetrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics',
[], [],
''' AutoMetric configuration
''',
'auto_metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-af-state', REFERENCE_ENUM_CLASS, 'IsisInterfaceAfStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisInterfaceAfStateEnum',
[], [],
''' Interface state
''',
'interface_af_state',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-frr-table', REFERENCE_CLASS, 'InterfaceFrrTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable',
[], [],
''' Fast-ReRoute configuration
''',
'interface_frr_table',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-link-group', REFERENCE_CLASS, 'InterfaceLinkGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceLinkGroup',
[], [],
''' Provide link group name and level
''',
'interface_link_group',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metrics', REFERENCE_CLASS, 'Metrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics',
[], [],
''' Metric configuration
''',
'metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mpls-ldp', REFERENCE_CLASS, 'MplsLdp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.MplsLdp',
[], [],
''' MPLS LDP configuration
''',
'mpls_ldp',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('prefix-sid', REFERENCE_CLASS, 'PrefixSid' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.PrefixSid',
[], [],
''' Assign prefix SID to an interface,
ISISPHPFlag will be rejected if set to
disable, ISISEXPLICITNULLFlag will
override the value of ISISPHPFlag
''',
'prefix_sid',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('running', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' The presence of this object allows an
address-family to be run over the
interface in question.This must be the
first object created under the
InterfaceAddressFamily container, and the
last one deleted
''',
'running',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('weights', REFERENCE_CLASS, 'Weights' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights',
[], [],
''' Weight configuration
''',
'weights',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'topology-name',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'IsisAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisAddressFamilyEnum',
[], [],
''' Address family
''',
'af_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'IsisSubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisSubAddressFamilyEnum',
[], [],
''' Sub address family
''',
'saf_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interface-af-data', REFERENCE_CLASS, 'InterfaceAfData' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData',
[], [],
''' Data container.
''',
'interface_af_data',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('topology-name', REFERENCE_LIST, 'TopologyName' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName',
[], [],
''' keys: topology-name
''',
'topology_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-af',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs',
False,
[
_MetaInfoClassMember('interface-af', REFERENCE_LIST, 'InterfaceAf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf',
[], [],
''' Configuration for an IS-IS address-family
on a single interface. If a named
(non-default) topology is being created it
must be multicast. Also the topology ID
mustbe set first and delete last in the
router configuration.
''',
'interface_af',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-afs',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals.CsnpInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals.CsnpInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Seconds
''',
'interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'csnp-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals',
False,
[
_MetaInfoClassMember('csnp-interval', REFERENCE_LIST, 'CsnpInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals.CsnpInterval',
[], [],
''' CSNP-interval configuration. No fixed
default value as this depends on the media
type of the interface.
''',
'csnp_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'csnp-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.LspIntervals.LspInterval' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.LspIntervals.LspInterval',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Milliseconds
''',
'interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-interval',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.LspIntervals' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.LspIntervals',
False,
[
_MetaInfoClassMember('lsp-interval', REFERENCE_LIST, 'LspInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.LspIntervals.LspInterval',
[], [],
''' Interval between transmission of LSPs on
interface.
''',
'lsp_interval',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'lsp-intervals',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.MeshGroupEnum' : _MetaInfoEnum('MeshGroupEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'blocked':'blocked',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'Isis.Instances.Instance.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('bfd', REFERENCE_CLASS, 'Bfd' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.Bfd',
[], [],
''' BFD configuration
''',
'bfd',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('circuit-type', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Configure circuit type for interface
''',
'circuit_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('csnp-intervals', REFERENCE_CLASS, 'CsnpIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals',
[], [],
''' CSNP-interval configuration
''',
'csnp_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('hello-accept-passwords', REFERENCE_CLASS, 'HelloAcceptPasswords' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords',
[], [],
''' IIH accept password configuration
''',
'hello_accept_passwords',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('hello-intervals', REFERENCE_CLASS, 'HelloIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.HelloIntervals',
[], [],
''' Hello-interval configuration
''',
'hello_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('hello-multipliers', REFERENCE_CLASS, 'HelloMultipliers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers',
[], [],
''' Hello-multiplier configuration
''',
'hello_multipliers',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('hello-paddings', REFERENCE_CLASS, 'HelloPaddings' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.HelloPaddings',
[], [],
''' Hello-padding configuration
''',
'hello_paddings',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('hello-passwords', REFERENCE_CLASS, 'HelloPasswords' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.HelloPasswords',
[], [],
''' IIH password configuration
''',
'hello_passwords',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-afs', REFERENCE_CLASS, 'InterfaceAfs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs',
[], [],
''' Per-interface address-family configuration
''',
'interface_afs',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('link-down-fast-detect', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Configure high priority detection of
interface down event
''',
'link_down_fast_detect',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-fast-flood-thresholds', REFERENCE_CLASS, 'LspFastFloodThresholds' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds',
[], [],
''' LSP fast flood threshold configuration
''',
'lsp_fast_flood_thresholds',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-intervals', REFERENCE_CLASS, 'LspIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.LspIntervals',
[], [],
''' LSP-interval configuration
''',
'lsp_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-retransmit-intervals', REFERENCE_CLASS, 'LspRetransmitIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals',
[], [],
''' LSP-retransmission-interval configuration
''',
'lsp_retransmit_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-retransmit-throttle-intervals', REFERENCE_CLASS, 'LspRetransmitThrottleIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals',
[], [],
''' LSP-retransmission-throttle-interval
configuration
''',
'lsp_retransmit_throttle_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mesh-group', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Mesh-group configuration
''',
'mesh_group',
'Cisco-IOS-XR-clns-isis-cfg', False, [
_MetaInfoClassMember('mesh-group', REFERENCE_ENUM_CLASS, 'Isis.Instances.Instance.Interfaces.Interface.MeshGroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.MeshGroupEnum',
[], [],
''' Mesh-group configuration
''',
'mesh_group',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mesh-group', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Mesh-group configuration
''',
'mesh_group',
'Cisco-IOS-XR-clns-isis-cfg', False),
]),
_MetaInfoClassMember('point-to-point', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' IS-IS will attempt to form point-to-point
over LAN adjacencies over this interface.
''',
'point_to_point',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('priorities', REFERENCE_CLASS, 'Priorities' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.Priorities',
[], [],
''' DIS-election priority configuration
''',
'priorities',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('running', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' This object must be set before any other
configuration is supplied for an interface,
and must be the last per-interface
configuration object to be removed.
''',
'running',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'IsisInterfaceStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisInterfaceStateEnum',
[], [],
''' Enable/Disable routing
''',
'state',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface',
[], [],
''' Configuration for an IS-IS interface
''',
'interface',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance',
False,
[
_MetaInfoClassMember('instance-name', ATTRIBUTE, 'str' , None, None,
[(0, 40)], [],
''' Instance identifier
''',
'instance_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('afs', REFERENCE_CLASS, 'Afs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs',
[], [],
''' Per-address-family configuration
''',
'afs',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('distribute', REFERENCE_CLASS, 'Distribute' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Distribute',
[], [],
''' IS-IS Distribute BGP-LS configuration
''',
'distribute',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('dynamic-host-name', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, dynamic hostname resolution is
disabled, and system IDs will always be
displayed by show and debug output.
''',
'dynamic_host_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ignore-lsp-errors', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, LSPs recieved with bad checksums will
result in the purging of that LSP from the LSP
DB. If FALSE or not set, the received LSP will
just be ignored.
''',
'ignore_lsp_errors',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('instance-id', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Instance ID of the IS-IS process
''',
'instance_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces',
[], [],
''' Per-interface configuration
''',
'interfaces',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('is-type', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' IS type of the IS-IS process
''',
'is_type',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('link-groups', REFERENCE_CLASS, 'LinkGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LinkGroups',
[], [],
''' Link Group
''',
'link_groups',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('log-adjacency-changes', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Log changes in adjacency state
''',
'log_adjacency_changes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('log-pdu-drops', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Log PDU drops
''',
'log_pdu_drops',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-accept-passwords', REFERENCE_CLASS, 'LspAcceptPasswords' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspAcceptPasswords',
[], [],
''' LSP/SNP accept password configuration
''',
'lsp_accept_passwords',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-arrival-times', REFERENCE_CLASS, 'LspArrivalTimes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspArrivalTimes',
[], [],
''' LSP arrival time configuration
''',
'lsp_arrival_times',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-check-intervals', REFERENCE_CLASS, 'LspCheckIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspCheckIntervals',
[], [],
''' LSP checksum check interval configuration
''',
'lsp_check_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-generation-intervals', REFERENCE_CLASS, 'LspGenerationIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspGenerationIntervals',
[], [],
''' LSP generation-interval configuration
''',
'lsp_generation_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-lifetimes', REFERENCE_CLASS, 'LspLifetimes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspLifetimes',
[], [],
''' LSP lifetime configuration
''',
'lsp_lifetimes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-mtus', REFERENCE_CLASS, 'LspMtus' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspMtus',
[], [],
''' LSP MTU configuration
''',
'lsp_mtus',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-passwords', REFERENCE_CLASS, 'LspPasswords' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspPasswords',
[], [],
''' LSP/SNP password configuration
''',
'lsp_passwords',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('lsp-refresh-intervals', REFERENCE_CLASS, 'LspRefreshIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.LspRefreshIntervals',
[], [],
''' LSP refresh-interval configuration
''',
'lsp_refresh_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('max-link-metrics', REFERENCE_CLASS, 'MaxLinkMetrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.MaxLinkMetrics',
[], [],
''' Max Link Metric configuration
''',
'max_link_metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('nets', REFERENCE_CLASS, 'Nets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Nets',
[], [],
''' NET configuration
''',
'nets',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('nsf', REFERENCE_CLASS, 'Nsf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Nsf',
[], [],
''' IS-IS NSF configuration
''',
'nsf',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('nsr', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' IS-IS NSR configuration
''',
'nsr',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('overload-bits', REFERENCE_CLASS, 'OverloadBits' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.OverloadBits',
[], [],
''' LSP overload-bit configuration
''',
'overload_bits',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('running', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Flag to indicate that instance should be
running. This must be the first object
created when an IS-IS instance is configured,
and the last object deleted when it is
deconfigured. When this object is deleted,
the IS-IS instance will exit.
''',
'running',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('srgb', REFERENCE_CLASS, 'Srgb' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Srgb',
[], [],
''' Segment Routing Global Block configuration
''',
'srgb',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('trace-buffer-size', REFERENCE_CLASS, 'TraceBufferSize' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.TraceBufferSize',
[], [],
''' Trace buffer size configuration
''',
'trace_buffer_size',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('tracing-mode', REFERENCE_ENUM_CLASS, 'IsisTracingModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisTracingModeEnum',
[], [],
''' Tracing mode configuration
''',
'tracing_mode',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'instance',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances' : {
'meta_info' : _MetaInfoClass('Isis.Instances',
False,
[
_MetaInfoClassMember('instance', REFERENCE_LIST, 'Instance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance',
[], [],
''' Configuration for a single IS-IS instance
''',
'instance',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'instances',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis' : {
'meta_info' : _MetaInfoClass('Isis',
False,
[
_MetaInfoClassMember('instances', REFERENCE_CLASS, 'Instances' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances',
[], [],
''' IS-IS instance configuration
''',
'instances',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'isis',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
}
_meta_table['Isis.Instances.Instance.LspGenerationIntervals.LspGenerationInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.LspGenerationIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.LspArrivalTimes.LspArrivalTime']['meta_info'].parent =_meta_table['Isis.Instances.Instance.LspArrivalTimes']['meta_info']
_meta_table['Isis.Instances.Instance.MaxLinkMetrics.MaxLinkMetric']['meta_info'].parent =_meta_table['Isis.Instances.Instance.MaxLinkMetrics']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting.PrefixSidMap']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.MetricStyles.MetricStyle']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.MetricStyles']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings.FrrLoadSharing']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits.PriorityLimit']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers.FrrTiebreaker']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies.FrrUseCandOnly']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities.SpfPrefixPriority']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes.SummaryPrefix']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces.ExcludeInterface']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp.Enable']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes.MaxRedistPrefix']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Propagations.Propagation']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Propagations']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Bgp']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Eigrp']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals.SpfPeriodicInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals.SpfInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.AdminDistances.AdminDistance']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.AdminDistances']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ispf.States.State']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ispf.States']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ispf.States']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ispf']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Mpls.RouterId']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Mpls']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Metrics.Metric']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Metrics']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Weights.Weight']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Weights']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.MetricStyles']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.RouterId']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.MicroLoopAvoidance']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Propagations']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.MonitorConvergence']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.DefaultInformation']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.AdminDistances']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ispf']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.MplsLdpGlobal']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Mpls']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Metrics']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData.Weights']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting.PrefixSidMap']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles.MetricStyle']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings.FrrLoadSharing']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits.PriorityLimit']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers.FrrTiebreaker']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies.FrrUseCandOnly']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities.SpfPrefixPriority']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes.SummaryPrefix']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces.ExcludeInterface']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.Enable']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes.MaxRedistPrefix']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Propagations.Propagation']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Propagations']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Bgp']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Eigrp']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals.SpfPeriodicInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals.SpfInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances.AdminDistance']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States.State']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ispf']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Mpls.RouterId']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Mpls']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Metrics']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Weights.Weight']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Weights']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.RouterId']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MicroLoopAvoidance']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Propagations']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MonitorConvergence']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.DefaultInformation']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ispf']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MplsLdpGlobal']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Mpls']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Metrics']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Weights']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs.Af']['meta_info']
_meta_table['Isis.Instances.Instance.Afs.Af']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Afs']['meta_info']
_meta_table['Isis.Instances.Instance.LspRefreshIntervals.LspRefreshInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.LspRefreshIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.LspAcceptPasswords.LspAcceptPassword']['meta_info'].parent =_meta_table['Isis.Instances.Instance.LspAcceptPasswords']['meta_info']
_meta_table['Isis.Instances.Instance.LspMtus.LspMtu']['meta_info'].parent =_meta_table['Isis.Instances.Instance.LspMtus']['meta_info']
_meta_table['Isis.Instances.Instance.LinkGroups.LinkGroup']['meta_info'].parent =_meta_table['Isis.Instances.Instance.LinkGroups']['meta_info']
_meta_table['Isis.Instances.Instance.LspCheckIntervals.LspCheckInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.LspCheckIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.LspPasswords.LspPassword']['meta_info'].parent =_meta_table['Isis.Instances.Instance.LspPasswords']['meta_info']
_meta_table['Isis.Instances.Instance.Nets.Net']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Nets']['meta_info']
_meta_table['Isis.Instances.Instance.LspLifetimes.LspLifetime']['meta_info'].parent =_meta_table['Isis.Instances.Instance.LspLifetimes']['meta_info']
_meta_table['Isis.Instances.Instance.OverloadBits.OverloadBit']['meta_info'].parent =_meta_table['Isis.Instances.Instance.OverloadBits']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals.LspRetransmitThrottleInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals.LspRetransmitInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.Priorities.Priority']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.Priorities']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords.HelloAcceptPassword']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloPasswords.HelloPassword']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloPasswords']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloPaddings.HelloPadding']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloPaddings']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers.HelloMultiplier']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds.LspFastFloodThreshold']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloIntervals.HelloInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes.FrrType']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults.InterfaceFrrTiebreakerDefault']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakers']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.InterfaceFrrTiebreakers']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics.AutoMetric']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags.AdminTag']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics.Metric']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights.Weight']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.PrefixSid']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.MplsLdp']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceLinkGroup']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes.FrrType']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults.InterfaceFrrTiebreakerDefault']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics.AutoMetric']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags.AdminTag']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights.Weight']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.PrefixSid']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.MplsLdp']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceLinkGroup']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals.CsnpInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspIntervals.LspInterval']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspIntervals']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.Bfd']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.Priorities']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloPasswords']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloPaddings']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface.LspIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info'].parent =_meta_table['Isis.Instances.Instance.Interfaces']['meta_info']
_meta_table['Isis.Instances.Instance.Srgb']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.LspGenerationIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.LspArrivalTimes']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.TraceBufferSize']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.MaxLinkMetrics']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.Afs']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.LspRefreshIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.Distribute']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.LspAcceptPasswords']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.LspMtus']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.Nsf']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.LinkGroups']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.LspCheckIntervals']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.LspPasswords']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.Nets']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.LspLifetimes']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.OverloadBits']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance.Interfaces']['meta_info'].parent =_meta_table['Isis.Instances.Instance']['meta_info']
_meta_table['Isis.Instances.Instance']['meta_info'].parent =_meta_table['Isis.Instances']['meta_info']
_meta_table['Isis.Instances']['meta_info'].parent =_meta_table['Isis']['meta_info']
| 58.088908
| 363
| 0.581469
| 39,939
| 411,618
| 5.759934
| 0.020982
| 0.087565
| 0.109456
| 0.112464
| 0.950258
| 0.945967
| 0.942855
| 0.929214
| 0.917486
| 0.898373
| 0
| 0.007399
| 0.285544
| 411,618
| 7,085
| 364
| 58.097107
| 0.774849
| 0
| 0
| 0.615561
| 0
| 0.015758
| 0.541144
| 0.462968
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.010013
| 0.001313
| 0
| 0.001313
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
29a4b28a3428e8a1d9d8be56e9b6795d1756e451
| 3,455
|
py
|
Python
|
tests/processing/pipeline/test_varia.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 133
|
2018-05-18T13:54:10.000Z
|
2022-02-15T02:14:20.000Z
|
tests/processing/pipeline/test_varia.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 68
|
2018-06-03T16:42:09.000Z
|
2021-01-29T10:58:30.000Z
|
tests/processing/pipeline/test_varia.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 37
|
2018-11-02T02:40:29.000Z
|
2021-11-30T07:44:50.000Z
|
import numpy as np
from audiomate.processing import pipeline
class TestAddContext:
def test_compute_with_left_frames(self):
data = np.array([[1, 2], [3, 4], [5, 6]])
chunk = pipeline.Chunk(data, offset=0, is_last=True)
step = pipeline.AddContext(left_frames=2, right_frames=0)
result = step.compute(chunk, 16000)
assert np.array_equal(result, np.array([
[0, 0, 0, 0, 1, 2],
[0, 0, 1, 2, 3, 4],
[1, 2, 3, 4, 5, 6]
]))
def test_compute_with_right_frames(self):
data = np.array([[1, 2], [3, 4], [5, 6]])
chunk = pipeline.Chunk(data, offset=0, is_last=True)
step = pipeline.AddContext(left_frames=0, right_frames=2)
result = step.compute(chunk, 16000)
assert np.array_equal(result, np.array([
[1, 2, 3, 4, 5, 6],
[3, 4, 5, 6, 0, 0],
[5, 6, 0, 0, 0, 0]
]))
def test_compute_with_left_and_right_frames(self):
data = np.array([[1, 2], [3, 4], [5, 6]])
chunk = pipeline.Chunk(data, offset=0, is_last=True)
step = pipeline.AddContext(left_frames=2, right_frames=2)
result = step.compute(chunk, 16000)
assert np.array_equal(result, np.array([
[0, 0, 0, 0, 1, 2, 3, 4, 5, 6],
[0, 0, 1, 2, 3, 4, 5, 6, 0, 0],
[1, 2, 3, 4, 5, 6, 0, 0, 0, 0]
]))
def test_compute_online_with_left_frames(self):
step = pipeline.AddContext(left_frames=2, right_frames=0)
# FIRST CHUNK
data = np.array([[1, 2], [3, 4]])
result = step.process_frames(data, 16000, offset=0, last=False)
assert np.array_equal(result, np.array([
[0, 0, 0, 0, 1, 2],
[0, 0, 1, 2, 3, 4],
]))
# SECOND CHUNK
data = np.array([[5, 6], [7, 8]])
result = step.process_frames(data, 16000, offset=2, last=True)
assert np.array_equal(result, np.array([
[1, 2, 3, 4, 5, 6],
[3, 4, 5, 6, 7, 8]
]))
def test_compute_online_with_right_frames(self):
step = pipeline.AddContext(left_frames=0, right_frames=2)
# FIRST CHUNK
data = np.array([[1, 2], [3, 4]])
result = step.process_frames(data, 16000, offset=0, last=False)
# Since it waits on context, no output is expected
assert result is None
# SECOND CHUNK
data = np.array([[5, 6], [7, 8]])
result = step.process_frames(data, 16000, offset=2, last=True)
assert np.array_equal(result, np.array([
[1, 2, 3, 4, 5, 6],
[3, 4, 5, 6, 7, 8],
[5, 6, 7, 8, 0, 0],
[7, 8, 0, 0, 0, 0]
]))
def test_compute_online_with_left_and_right_frames(self):
step = pipeline.AddContext(left_frames=2, right_frames=2)
# FIRST CHUNK
data = np.array([[1, 2], [3, 4]])
result = step.process_frames(data, 16000, offset=0, last=False)
# Since it waits on context, no output is expected
assert result is None
# SECOND CHUNK
data = np.array([[5, 6], [7, 8]])
result = step.process_frames(data, 16000, offset=2, last=True)
assert np.array_equal(result, np.array([
[0, 0, 0, 0, 1, 2, 3, 4, 5, 6],
[0, 0, 1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8, 0, 0],
[3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
]))
| 31.697248
| 71
| 0.520984
| 526
| 3,455
| 3.309886
| 0.104563
| 0.03676
| 0.031017
| 0.041356
| 0.94888
| 0.922458
| 0.904078
| 0.904078
| 0.881103
| 0.848937
| 0
| 0.111632
| 0.320695
| 3,455
| 108
| 72
| 31.990741
| 0.630166
| 0.049783
| 0
| 0.712329
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123288
| 1
| 0.082192
| false
| 0
| 0.027397
| 0
| 0.123288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
29b079de363644052e9f050b85bccb4f6f3b431f
| 5,817
|
py
|
Python
|
quality_check.py
|
Veggente/soybean-network
|
c624a0721db13a86c455fb4f7f2721beae737104
|
[
"MIT"
] | null | null | null |
quality_check.py
|
Veggente/soybean-network
|
c624a0721db13a86c455fb4f7f2721beae737104
|
[
"MIT"
] | null | null | null |
quality_check.py
|
Veggente/soybean-network
|
c624a0721db13a86c455fb4f7f2721beae737104
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Quality check for weighted GRN reconstruction.
Functions:
show_histograms: Save or plot edge visibility histograms.
hist_w_correct: Save or plot edge visibility histograms with
ground truth.
"""
import networkx as nx
import matplotlib.pyplot as plt
plt.style.use("ggplot")
import numpy as np
def show_histograms(
graphml_file,
ylim=0,
self_edge=False,
output="",
display=False,
figsize=None,
show_mean=False,
dpi=300,
):
"""Show histograms of the edge weights.
Args:
graphml_file: A dictionary of GraphML files.
ylim: Y-axis limit. 0 means default limit.
self_edge: Indicator for self-edges.
output: Output filename.
display: Indicates whether to display the plot.
figsize: Figure size.
show_mean: Indicator for showing the mean visibility
with a black vertical line.
dpi: DPI for png figure. Default is 300.
Returns:
Plots the histograms.
"""
num_graphs = len(graphml_file)
fig, ax = plt.subplots(num_graphs, sharey=True, figsize=figsize)
bins = np.linspace(0, 1, 101)
width = 0.01
for idx_gf, alg in enumerate(graphml_file):
network = nx.read_graphml(graphml_file[alg])
num_genes = len(network.nodes())
if self_edge:
total_num_edges = num_genes ** 2
else:
total_num_edges = num_genes * (num_genes - 1)
if self_edge:
weights = [data["weight"] for u, v, data in network.edges(data=True)]
else:
weights = [
data["weight"] for u, v, data in network.edges(data=True) if u != v
]
hist, _ = np.histogram(weights, bins=bins)
# Assign to edges that never appear the weight of 0.
hist[0] = total_num_edges - sum(hist)
if num_graphs == 1:
ax = [ax]
ax[idx_gf].bar(bins[:-1] + width / 2, hist / total_num_edges, width)
ax[idx_gf].set_title(alg)
if ylim:
ax[idx_gf].set_ylim(0, ylim)
if show_mean:
ax[idx_gf].axvline(np.mean(weights), color="k")
plt.tight_layout()
if output:
fig.savefig(output, dpi=dpi)
if display:
fig.show()
return
def hist_w_correct(
graphml_file,
ground_truth,
ylim=0,
self_edge=False,
output="",
display=False,
figsize=None,
show_mean=False,
dpi=300,
):
"""Histogram with correctness.
Args:
graphml_file: A dictionary of GraphML files.
ground_truth: Adjacency matrix file for the ground truth.
ylim: Y-axis limit. 0 means default limit.
self_edge: Indicator for self-edges.
output: Output filename.
display: Indicates whether to display the plot.
figsize: Figure size.
show_mean: Indicator for showing the mean visibility with
a black vertical line.
dpi: DPI for png figure. Default is 300.
Returns:
Plots the histograms or saves as a file.
"""
num_graphs = len(graphml_file)
fig, ax = plt.subplots(num_graphs, sharey=True, figsize=figsize)
bins = np.linspace(0, 1, 101)
width = 0.01
adj_mat = np.loadtxt(ground_truth, delimiter=" ")
for idx_gf, alg in enumerate(graphml_file):
network = nx.read_graphml(graphml_file[alg])
num_genes = len(network.nodes())
if self_edge:
total_num_edges = num_genes ** 2
else:
total_num_edges = num_genes * (num_genes - 1)
if self_edge:
weights = [data["weight"] for u, v, data in network.edges(data=True)]
# Here we assume the gene ID for the ith gene is
# 'Gene'+i, for 0 <= i <= n-1.
weights_correct = [
data["weight"]
for u, v, data in network.edges(data=True)
if data["sign"] == np.sign(adj_mat[int(u[4:]), int(v[4:])])
]
false_positive = [
True
for u, v, data in network.edges(data=True)
if not adj_mat[int(u[4:]), int(v[4:])]
]
else:
weights = [
data["weight"] for u, v, data in network.edges(data=True) if u != v
]
# Here we assume the gene ID for the ith gene is
# 'Gene'+i, for 0 <= i <= n-1.
weights_correct = [
data["weight"]
for u, v, data in network.edges(data=True)
if data["sign"] == np.sign(adj_mat[int(u[4:]), int(v[4:])]) and u != v
]
false_positive = [
True
for u, v, data in network.edges(data=True)
if not adj_mat[int(u[4:]), int(v[4:])] and u != v
]
hist, _ = np.histogram(weights, bins=bins)
hist_correct, _ = np.histogram(weights_correct, bins=bins)
# Assign to edges that never appear the weight of 0.
hist[0] = total_num_edges - sum(hist)
hist_correct[0] = (
total_num_edges - np.sum(adj_mat != 0) - np.sum(false_positive)
)
if num_graphs == 1:
ax = [ax]
ax[idx_gf].bar(
bins[:-1] + width / 2, hist / total_num_edges, width, label="False"
)
ax[idx_gf].bar(
bins[:-1] + width / 2,
hist_correct / total_num_edges,
width,
color="g",
label="True",
)
ax[idx_gf].legend()
if ylim:
ax[idx_gf].set_ylim(0, ylim)
if show_mean:
ax[idx_gf].axvline(np.mean(weights), color="k")
ax[idx_gf].set_title(alg)
plt.tight_layout()
if output:
fig.savefig(output, dpi=dpi)
if display:
fig.show()
return
| 32.316667
| 86
| 0.559395
| 780
| 5,817
| 4.037179
| 0.192308
| 0.019054
| 0.041283
| 0.022864
| 0.824706
| 0.824706
| 0.79168
| 0.79168
| 0.749762
| 0.741823
| 0
| 0.01674
| 0.332474
| 5,817
| 179
| 87
| 32.497207
| 0.794231
| 0.254942
| 0
| 0.746032
| 0
| 0
| 0.014996
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.02381
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9bcb6569a2fdafd64f19b0c6bb4c14d064979e2
| 55,929
|
py
|
Python
|
tests/erica_legacy/request_processing/test_requests_controller.py
|
digitalservice4germany/erica
|
7e07d88f3db78ab6e4f7cccad8dfef2a4b3a71b2
|
[
"MIT"
] | 3
|
2022-01-31T15:17:17.000Z
|
2022-03-01T16:15:47.000Z
|
tests/erica_legacy/request_processing/test_requests_controller.py
|
digitalservice4germany/erica
|
7e07d88f3db78ab6e4f7cccad8dfef2a4b3a71b2
|
[
"MIT"
] | 59
|
2022-01-31T14:04:20.000Z
|
2022-03-31T20:08:47.000Z
|
tests/erica_legacy/request_processing/test_requests_controller.py
|
digitalservice4germany/erica
|
7e07d88f3db78ab6e4f7cccad8dfef2a4b3a71b2
|
[
"MIT"
] | 1
|
2022-03-10T09:24:28.000Z
|
2022-03-10T09:24:28.000Z
|
import base64
import unittest
from datetime import date
from unittest.mock import patch, MagicMock, call
import pytest
from erica.domain.FreischaltCode.FreischaltCode import FreischaltCodeActivatePayload, FreischaltCodeRevocatePayload
from erica.domain.tax_number_validation.check_tax_number import CheckTaxNumberPayload
from erica.erica_legacy.pyeric.check_elster_request_id import SPECIAL_TESTMERKER_IDNR
from erica.erica_legacy.pyeric.eric_errors import InvalidBufaNumberError
from erica.erica_legacy.pyeric.pyeric_response import PyericResponse
from erica.erica_legacy.request_processing.eric_mapper import EstEricMapping, UnlockCodeRequestEricMapper
from erica.erica_legacy.request_processing.erica_input.v1.erica_input import UnlockCodeRequestData, \
UnlockCodeActivationData, \
UnlockCodeRevocationData, GetAddressData
from erica.erica_legacy.request_processing.requests_controller import UnlockCodeRequestController, \
UnlockCodeActivationRequestController, EstRequestController, EstValidationRequestController, \
UnlockCodeRevocationRequestController, GetAddressRequestController, \
GetBelegeRequestController, CheckTaxNumberRequestController
from tests.erica_legacy.utils import create_est, missing_cert, missing_pyeric_lib, replace_text_in_xml, \
replace_subtree_in_xml, TEST_EST_VERANLAGUNGSJAHR
from tests.utils import read_text_from_sample
class TestEstValidationRequestProcess(unittest.TestCase):
def test_pyeric_controller_is_initialised_with_correct_arguments(self):
est_validation_request = EstValidationRequestController(create_est(correct_form_data=True))
xml = '<xml></xml>'
with patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.__init__',
MagicMock(return_value=None)) \
as pyeric_controller_init, \
patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.get_eric_response'), \
patch('erica.erica_legacy.elster_xml.est_mapping.check_and_generate_entries'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.EstValidationRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.'
'generate_full_est_xml', MagicMock(return_value=xml)):
est_validation_request.process()
pyeric_controller_init.assert_called_with(xml, TEST_EST_VERANLAGUNGSJAHR)
def test_pyeric_get_eric_response_is_called(self):
est_validation_request = EstValidationRequestController(create_est(correct_form_data=True))
xml = '<xml></xml>'
with patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.__init__',
MagicMock(return_value=None)), \
patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.get_eric_response') \
as pyeric_controller_get_response, \
patch(
'erica.erica_legacy.request_processing.requests_controller.EstValidationRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.'
'generate_full_est_xml', MagicMock(return_value=xml)):
est_validation_request.process()
pyeric_controller_get_response.assert_called()
class TestEstRequestInit(unittest.TestCase):
def test_if_no_include_param_given_then_set_include_false(self):
created_request = EstRequestController(create_est(correct_form_data=True), include_elster_responses=False)
self.assertFalse(created_request.include_elster_responses)
def test_if_include_param_true_then_set_include_true(self):
created_request = EstRequestController(create_est(correct_form_data=True), include_elster_responses=True)
self.assertTrue(created_request.include_elster_responses)
class TestEstRequestProcess(unittest.TestCase):
def test_check_and_generate_entries_is_called_with_eric_mapped_object(self):
eric_mapped_object = EstEricMapping.parse_obj(create_est(correct_form_data=True).est_data)
with patch('erica.erica_legacy.request_processing.eric_mapper', MagicMock(return_value=eric_mapped_object)), \
patch(
'erica.erica_legacy.request_processing.requests_controller.est_mapping.check_and_generate_entries') as generate_entries, \
patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.get_eric_response'), \
patch('erica.erica_legacy.request_processing.requests_controller.EstRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_est_xml'):
EstRequestController(create_est(correct_form_data=True)).process()
assert generate_entries.mock_calls == [call(eric_mapped_object.__dict__)]
def test_pyeric_controller_is_initialised_with_correct_arguments(self):
est_request = EstRequestController(create_est(correct_form_data=True))
xml = '<xml></xml>'
with patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.__init__',
MagicMock(return_value=None)) \
as pyeric_controller_init, \
patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.get_eric_response'), \
patch('erica.erica_legacy.request_processing.requests_controller.EstRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.'
'generate_full_est_xml', MagicMock(return_value=xml)):
est_request.process()
pyeric_controller_init.assert_called_with(xml, TEST_EST_VERANLAGUNGSJAHR)
def test_pyeric_get_eric_response_is_called(self):
est_request = EstRequestController(create_est(correct_form_data=True))
xml = '<xml></xml>'
with patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.__init__',
MagicMock(return_value=None)), \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.get_eric_response') as pyeric_get_response, \
patch(
'erica.erica_legacy.request_processing.requests_controller.est_mapping.check_and_generate_entries'), \
patch('erica.erica_legacy.request_processing.requests_controller.EstRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.'
'generate_full_est_xml', MagicMock(return_value=xml)):
est_request.process()
pyeric_get_response.assert_called()
def test_if_use_testmerker_env_false_and_special_idnr_then_create_xml_is_called_with_use_testmerker_set_true(self):
correct_est = create_est(correct_form_data=True)
correct_est.est_data.person_a_idnr = SPECIAL_TESTMERKER_IDNR[0]
with patch('erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_est_xml') as generate_xml_fun, \
patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.get_eric_response'), \
patch('erica.erica_legacy.request_processing.requests_controller.EstRequestController.generate_json'):
est_request = EstRequestController(correct_est)
est_request.process()
self.assertTrue(generate_xml_fun.call_args.kwargs['use_testmerker'])
def test_if_use_testmerker_env_false_and_not_special_idnr_then_create_xml_is_called_with_use_testmerker_set_false(
self):
correct_est = create_est(correct_form_data=True)
correct_est.est_data.person_a_idnr = '02293417683'
with patch('erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_est_xml') as generate_xml_fun, \
patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.get_eric_response'), \
patch('erica.erica_legacy.request_processing.requests_controller.EstRequestController.generate_json'):
est_request = EstRequestController(correct_est)
est_request.process()
self.assertFalse(generate_xml_fun.call_args.kwargs['use_testmerker'])
def test_if_submission_without_tax_nr_then_generate_vorsatz_without_tax_nr_is_called(self):
empfaenger = '9198'
correct_est = create_est(correct_form_data=True, with_tax_number=False)
correct_est.est_data.bufa_nr = empfaenger
with patch(
'erica.erica_legacy.request_processing.requests_controller.generate_vorsatz_without_tax_number') as generate_vorsatz_without_tax_number, \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_est_xml') as generate_xml_fun, \
patch('erica.erica_legacy.pyeric.pyeric_controller.EstPyericProcessController.get_eric_response'), \
patch('erica.erica_legacy.request_processing.requests_controller.EstRequestController.generate_json'):
est_request = EstRequestController(correct_est)
est_request.process()
generate_vorsatz_without_tax_number.assert_called()
self.assertEqual(empfaenger, generate_xml_fun.call_args.args[-1]) # empfaenger should be the last args
@pytest.mark.skipif(missing_cert(), reason="skipped because of missing cert.pfx; see pyeric/README.md")
@pytest.mark.skipif(missing_pyeric_lib(), reason="skipped because of missing eric lib; see pyeric/README.md")
def test_if_full_form_then_return_not_none_response(self):
est_request = EstRequestController(create_est(correct_form_data=True))
response = est_request.process()
self.assertIsNotNone(response)
@pytest.mark.skipif(missing_cert(), reason="skipped because of missing cert.pfx; see pyeric/README.md")
@pytest.mark.skipif(missing_pyeric_lib(), reason="skipped because of missing eric lib; see pyeric/README.md")
def test_if_submission_without_tax_nr_then_return_not_none_response(self):
est_request = EstRequestController(create_est(correct_form_data=True, with_tax_number=False))
response = est_request.process()
self.assertIsNotNone(response)
@pytest.mark.skipif(missing_cert(), reason="skipped because of missing cert.pfx; see pyeric/README.md")
@pytest.mark.skipif(missing_pyeric_lib(), reason="skipped because of missing eric lib; see pyeric/README.md")
def test_if_full_form_and_include_elster_responses_then_return_response_only_with_correct_keys(self):
expected_keys = ['transferticket', 'pdf', 'eric_response', 'server_response']
est_request = EstRequestController(create_est(correct_form_data=True), include_elster_responses=True)
response = est_request.process()
self.assertEqual(set(expected_keys), set(response.keys()))
@pytest.mark.skipif(missing_cert(), reason="skipped because of missing cert.pfx; see pyeric/README.md")
@pytest.mark.skipif(missing_pyeric_lib(), reason="skipped because of missing eric lib; see pyeric/README.md")
def test_if_full_form_and_not_include_elster_responses_then_return_response_with_correct_keys(self):
expected_keys = ['transferticket', 'pdf']
est_request = EstRequestController(create_est(correct_form_data=True), include_elster_responses=False)
response = est_request.process()
self.assertEqual(expected_keys, list(response.keys()))
class TestEstRequestGenerateJson(unittest.TestCase):
def setUp(self):
self.expected_transferticket = 'J-KLAPAUCIUS'
self.pdf_bytes = b"Our lives begin the day we become silent about things that matter"
self.expected_pdf = base64.b64encode(self.pdf_bytes).decode('utf-8')
self.expected_eric_response = "We are now faced with the fact that tomorrow is today."
response_with_correct_transferticket = replace_text_in_xml(
read_text_from_sample('sample_est_response_server.xml'),
'TransferTicket', self.expected_transferticket)
self.expected_server_response = response_with_correct_transferticket
def test_if_id_given_and_include_true_then_return_json_with_correct_info(self):
expected_output = {
'transferticket': self.expected_transferticket,
'pdf': self.expected_pdf,
'eric_response': self.expected_eric_response,
'server_response': self.expected_server_response
}
est_request = EstRequestController(create_est(correct_form_data=True), include_elster_responses=True)
pyeric_response = PyericResponse(self.expected_eric_response,
self.expected_server_response,
self.pdf_bytes)
actual_response = est_request.generate_json(pyeric_response)
self.assertEqual(expected_output, actual_response)
def test_if_id_given_and_include_false_then_return_json_with_correct_info(self):
expected_output = {
'transferticket': self.expected_transferticket,
'pdf': self.expected_pdf
}
est_request = EstRequestController(create_est(correct_form_data=True), include_elster_responses=False)
pyeric_response = PyericResponse(self.expected_eric_response, self.expected_server_response,
self.pdf_bytes)
actual_response = est_request.generate_json(pyeric_response)
self.assertEqual(expected_output, actual_response)
class TestUnlockCodeRequestInit(unittest.TestCase):
def test_if_idnr_given_then_set_idnr_as_attribute_correctly(self):
expected_idnr = "09952417688"
created_request = UnlockCodeRequestController(UnlockCodeRequestData(idnr=expected_idnr, dob=date(1969, 7, 20)))
self.assertEqual(expected_idnr, created_request.input_data.tax_id_number)
def test_if_no_include_param_given_then_set_include_false(self):
created_request = UnlockCodeRequestController(UnlockCodeRequestData(idnr="09952417688", dob=date(1969, 7, 20)))
self.assertFalse(created_request.include_elster_responses)
def test_if_include_param_true_then_set_include_true(self):
created_request = UnlockCodeRequestController(UnlockCodeRequestData(
idnr="09952417688",
dob=date(1969, 7, 20)),
include_elster_responses=True)
self.assertTrue(created_request.include_elster_responses)
class TestUnlockCodeRequestProcess(unittest.TestCase):
def setUp(self):
self.unlock_request_with_valid_input = UnlockCodeRequestController(UnlockCodeRequestData(
idnr="02293417683",
dob=date(1985, 1, 1)))
self.unlock_request_with_valid_input_with_special_idnr = UnlockCodeRequestController(UnlockCodeRequestData(
idnr=SPECIAL_TESTMERKER_IDNR[0],
dob=date(1985, 1, 1)))
def test_pyeric_controller_is_initialised_with_correct_arguments(self):
xml = '<xml></xml>'
with patch('erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRequestPyericProcessController.__init__',
MagicMock(return_value=None)) \
as pyeric_controller_init, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRequestPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.'
'generate_full_vast_request_xml', MagicMock(return_value=xml)):
self.unlock_request_with_valid_input.process()
pyeric_controller_init.assert_called_with(xml)
def test_pyeric_get_eric_response_is_called(self):
xml = '<xml></xml>'
with patch('erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRequestPyericProcessController.__init__',
MagicMock(return_value=None)), \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRequestPyericProcessController.get_eric_response') \
as pyeric_controller_get_response, \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.'
'generate_full_vast_request_xml', MagicMock(return_value=xml)):
self.unlock_request_with_valid_input.process()
pyeric_controller_get_response.assert_called()
def test_if_special_idnr_then_create_xml_is_called_with_use_testmerker_set_true(self):
with patch(
'erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_request_xml') as generate_xml_fun, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRequestPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeRequestController.generate_json'):
self.unlock_request_with_valid_input_with_special_idnr.process()
self.assertTrue(generate_xml_fun.call_args.kwargs['use_testmerker'])
def test_if_not_special_idnr_then_create_xml_is_called_with_use_testmerker_set_false(self):
with patch(
'erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_request_xml') as generate_xml_fun, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRequestPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeRequestController.generate_json'):
self.unlock_request_with_valid_input.process()
self.assertFalse(generate_xml_fun.call_args.kwargs['use_testmerker'])
def test_if_processed_called_then_elster_request_id_added_to_cache_list(self):
elster_request_id = "1234"
with patch('erica.erica_legacy.request_processing.requests_controller.TransferticketRequestController.process', MagicMock(return_value={'elster_request_id': elster_request_id})),\
patch('erica.erica_legacy.request_processing.requests_controller.add_new_request_id_to_cache_list') as add_to_cache_list:
self.unlock_request_with_valid_input.process()
add_to_cache_list.assert_called_once_with(elster_request_id)
class TestUnlockCodeRequestGenerateFullXml(unittest.TestCase):
def test_if_dob_date_given_then_call_generate_full_xml_with_unlock_code_eric_mapping(self):
unlock_code_eric_mapping = UnlockCodeRequestEricMapper(tax_id_number="09952417688", date_of_birth=date(1969, 7, 20))
created_request = UnlockCodeRequestController(UnlockCodeRequestData(idnr="09952417688", dob=date(1969, 7, 20)))
with patch(
'erica.erica_legacy.request_processing.requests_controller.elster_xml_generator.generate_full_vast_request_xml') as generate_full_xml:
created_request.generate_full_xml(use_testmerker=True)
assert generate_full_xml.mock_calls == [call(unlock_code_eric_mapping.__dict__, use_testmerker=True)]
class TestUnlockCodeRequestGenerateJson(unittest.TestCase):
def setUp(self):
self.expected_request_id = 'J-KLAPAUCIUS'
self.expected_transferticket = 'Transferiato'
self.expected_idnr = "123456789"
self.expected_eric_response = "We are now faced with the fact that tomorrow is today."
response_with_correct_transferticket = replace_text_in_xml(
read_text_from_sample('sample_vast_request_response.xml'),
'TransferTicket', self.expected_transferticket)
self.expected_server_response = response_with_correct_transferticket
def test_if_id_given_and_include_true_then_return_json_with_correct_info(self):
expected_output = {
'transferticket': self.expected_transferticket,
'elster_request_id': self.expected_request_id,
'idnr': self.expected_idnr,
'eric_response': self.expected_eric_response,
'server_response': self.expected_server_response
}
unlock_code_request = UnlockCodeRequestController(UnlockCodeRequestData(
idnr=self.expected_idnr,
dob=date(1985, 1, 1)), include_elster_responses=True)
with patch('erica.erica_legacy.request_processing.requests_controller.get_antrag_id_from_xml',
MagicMock(return_value=self.expected_request_id)):
pyeric_response = PyericResponse(self.expected_eric_response, self.expected_server_response)
actual_response = unlock_code_request.generate_json(pyeric_response)
self.assertEqual(expected_output, actual_response)
def test_if_id_given_and_include_false_then_return_json_with_correct_info(self):
expected_output = {
'transferticket': self.expected_transferticket,
'elster_request_id': self.expected_request_id,
'idnr': self.expected_idnr,
}
unlock_code_request = UnlockCodeRequestController(UnlockCodeRequestData(
idnr=self.expected_idnr,
dob=date(1985, 1, 1)), include_elster_responses=False)
with patch('erica.erica_legacy.request_processing.requests_controller.get_antrag_id_from_xml',
MagicMock(return_value=self.expected_request_id)):
pyeric_response = PyericResponse(self.expected_eric_response, self.expected_server_response)
actual_response = unlock_code_request.generate_json(pyeric_response)
self.assertEqual(expected_output, actual_response)
def test_if_eric_process_successful_then_return_correct_elster_request_id(self):
unlock_code_request = UnlockCodeRequestController(UnlockCodeRequestData(
idnr=self.expected_idnr,
dob=date(1985, 1, 1)), include_elster_responses=False)
expected_elster_request_id = "PizzaAndApplePie"
successful_server_response = replace_text_in_xml(read_text_from_sample('sample_vast_request_response.xml', 'r'),
"AntragsID", expected_elster_request_id)
pyeric_response = PyericResponse('eric_response', successful_server_response)
actual_response = unlock_code_request.generate_json(pyeric_response)
self.assertEqual(expected_elster_request_id, actual_response['elster_request_id'])
def test_if_eric_process_successful_then_return_correct_transferticket(self):
unlock_code_request = UnlockCodeRequestController(UnlockCodeRequestData(
idnr=self.expected_idnr,
dob=date(1985, 1, 1)), include_elster_responses=False)
expected_transferticket = "PizzaAndNutCake"
successful_server_response = replace_text_in_xml(read_text_from_sample('sample_vast_request_response.xml', 'r'),
"TransferTicket", expected_transferticket)
pyeric_response = PyericResponse('eric_response', successful_server_response)
actual_response = unlock_code_request.generate_json(pyeric_response)
self.assertEqual(expected_transferticket, actual_response['transferticket'])
class TestUnlockCodeActivationProcess(unittest.TestCase):
def setUp(self):
self.known_idnr = '02293417683'
self.unlock_activation_with_valid_input = UnlockCodeActivationRequestController(UnlockCodeActivationData(
idnr=self.known_idnr,
unlock_code='1985-T67D-K89O',
elster_request_id='42'))
self.unlock_activation_with_valid_input_with_special_idnr = UnlockCodeActivationRequestController(
UnlockCodeActivationData(
idnr=SPECIAL_TESTMERKER_IDNR[0],
unlock_code='1985-T67D-K89O',
elster_request_id='42'))
self.unlock_activation_with_unknown_idnr = UnlockCodeActivationRequestController(UnlockCodeActivationData(
idnr="123456789",
unlock_code='1985-T67D-K89O',
elster_request_id='42'))
self.unlock_activation_without_idnr = UnlockCodeActivationRequestController(FreischaltCodeActivatePayload(
freischalt_code='1985-T67D-K89O',
elster_request_id='42'))
def test_pyeric_controller_is_initialised_with_correct_argument(self):
xml = '<xml></xml>'
with patch('erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeActivationPyericProcessController.__init__',
MagicMock(return_value=None)) \
as pyeric_controller_init, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeActivationPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeActivationRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.'
'generate_full_vast_activation_xml', MagicMock(return_value=xml)):
self.unlock_activation_with_valid_input.process()
pyeric_controller_init.assert_called_with(xml)
def test_pyeric_get_eric_response_is_called(self):
xml = '<xml></xml>'
with patch('erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeActivationPyericProcessController.__init__',
MagicMock(return_value=None)), \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeActivationPyericProcessController.get_eric_response') \
as pyeric_controller_get_response, \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeActivationRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_activation_xml',
MagicMock(return_value=xml)):
self.unlock_activation_with_valid_input.process()
pyeric_controller_get_response.assert_called()
def test_if_special_idnr_then_create_xml_is_called_with_use_testmerker_set_true(self):
with patch(
'erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_activation_xml') as generate_xml_fun, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeActivationPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeActivationRequestController.generate_json'):
self.unlock_activation_with_valid_input_with_special_idnr.process()
self.assertTrue(generate_xml_fun.call_args.kwargs['use_testmerker'])
def test_if_not_special_idnr_then_create_xml_is_called_with_use_testmerker_set_false(self):
with patch(
'erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_activation_xml') as generate_xml_fun, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeActivationPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeActivationRequestController.generate_json'):
self.unlock_activation_with_valid_input.process()
self.assertFalse(generate_xml_fun.call_args.kwargs['use_testmerker'])
def test_if_idnr_and_request_needs_test_merker_then_create_xml_is_called_with_true(self):
with patch(
'erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_activation_xml') as generate_xml_fun, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeActivationPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeActivationRequestController.generate_json'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.request_needs_testmerker', MagicMock(return_value=True)):
self.unlock_activation_without_idnr.process()
self.assertTrue(generate_xml_fun.call_args.kwargs['use_testmerker'])
def test_if_idnr_and_request_needs_test_merker_then_create_xml_is_called_with_false(self):
with patch(
'erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_activation_xml') as generate_xml_fun, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeActivationPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeActivationRequestController.generate_json'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.request_needs_testmerker', MagicMock(return_value=False)):
self.unlock_activation_without_idnr.process()
self.assertFalse(generate_xml_fun.call_args.kwargs['use_testmerker'])
class TestUnlockCodeActivationGenerateJson(unittest.TestCase):
def setUp(self):
self.expected_idnr = "123456789"
self.expected_request_id = 'J-KLAPAUCIUS'
self.expected_transferticket = 'Transfiguration'
self.expected_eric_response = "We are now faced with the fact that tomorrow is today."
response_with_correct_transferticket = replace_text_in_xml(
read_text_from_sample('sample_vast_activation_response.xml'),
'TransferTicket', self.expected_transferticket)
self.expected_server_response = response_with_correct_transferticket
def test_if_id_given_and_include_true_then_return_json_with_correct_info(self):
expected_output = {
'transferticket': self.expected_transferticket,
'elster_request_id': self.expected_request_id,
'idnr': self.expected_idnr,
'eric_response': self.expected_eric_response,
'server_response': self.expected_server_response
}
unlock_code_request = UnlockCodeActivationRequestController(UnlockCodeActivationData(
idnr=self.expected_idnr,
unlock_code='1985-T67D-K89O',
elster_request_id='42'), include_elster_responses=True)
pyeric_response = PyericResponse(self.expected_eric_response, self.expected_server_response)
with patch('erica.erica_legacy.request_processing.requests_controller.get_antrag_id_from_xml',
MagicMock(return_value=self.expected_request_id)):
actual_response = unlock_code_request.generate_json(pyeric_response)
self.assertEqual(expected_output, actual_response)
def test_if_id_given_and_include_false_then_return_json_with_correct_info(self):
expected_output = {
'transferticket': self.expected_transferticket,
'elster_request_id': self.expected_request_id,
'idnr': self.expected_idnr,
}
unlock_code_request = UnlockCodeActivationRequestController(UnlockCodeActivationData(
idnr=self.expected_idnr,
unlock_code='1985-T67D-K89O',
elster_request_id='42'), include_elster_responses=False)
pyeric_response = PyericResponse(self.expected_eric_response, self.expected_server_response)
with patch('erica.erica_legacy.request_processing.requests_controller.get_antrag_id_from_xml',
MagicMock(return_value=self.expected_request_id)):
actual_response = unlock_code_request.generate_json(pyeric_response)
self.assertEqual(expected_output, actual_response)
def test_if_eric_process_successful_then_return_correct_transferticket(self):
expected_transferticket = "PizzaAndNutCake"
unlock_code_activation = UnlockCodeActivationRequestController(UnlockCodeActivationData(
idnr=self.expected_idnr,
unlock_code='1985-T67D-K89O',
elster_request_id='42'), include_elster_responses=False)
successful_server_response = replace_text_in_xml(read_text_from_sample('sample_vast_activation_response.xml'),
"TransferTicket", expected_transferticket)
pyeric_response = PyericResponse('eric_response', successful_server_response)
actual_response = unlock_code_activation.generate_json(pyeric_response)
self.assertEqual(expected_transferticket, actual_response['transferticket'])
class TestUnlockCodeRevocationProcess(unittest.TestCase):
def setUp(self):
self.known_idnr = '02293417683'
self.unlock_revocation_with_valid_input = UnlockCodeRevocationRequestController(UnlockCodeRevocationData(
idnr=self.known_idnr,
elster_request_id='lookanotherrequestid'))
self.unlock_revocation_with_valid_input_and_special_idnr = UnlockCodeRevocationRequestController(
UnlockCodeRevocationData(
idnr=SPECIAL_TESTMERKER_IDNR[0],
elster_request_id='lookanotherrequestid'))
self.unlock_revocation_with_unknown_idnr = UnlockCodeRevocationRequestController(UnlockCodeRevocationData(
idnr="123456789",
elster_request_id='lookyetanotherrequestid'))
self.unlock_revocation_without_idnr = UnlockCodeRevocationRequestController(FreischaltCodeRevocatePayload(
elster_request_id='lookyetanotherrequestid'))
def test_pyeric_controller_is_initialised_with_correct_arguments(self):
xml = '<xml></xml>'
with patch('erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRevocationPyericProcessController.__init__',
MagicMock(return_value=None)) \
as pyeric_controller_init, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRevocationPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeRevocationRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_revocation_xml',
MagicMock(return_value=xml)):
self.unlock_revocation_with_valid_input.process()
pyeric_controller_init.assert_called_with(xml)
def test_pyeric_get_eric_response_is_called(self):
xml = '<xml></xml>'
with patch('erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRevocationPyericProcessController.__init__',
MagicMock(return_value=None)), \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRevocationPyericProcessController.get_eric_response') \
as pyeric_controller_get_response, \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeRevocationRequestController.generate_json'), \
patch('erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_revocation_xml',
MagicMock(return_value=xml)):
self.unlock_revocation_with_valid_input.process()
pyeric_controller_get_response.assert_called()
def test_if_special_idnr_then_create_xml_is_called_with_use_testmerker_set_true(self):
with patch(
'erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_revocation_xml') as generate_xml_fun, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRevocationPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeRevocationRequestController.generate_json'):
self.unlock_revocation_with_valid_input_and_special_idnr.process()
self.assertTrue(generate_xml_fun.call_args.kwargs['use_testmerker'])
def test_if_not_special_idnr_then_create_xml_is_called_with_use_testmerker_set_false(self):
with patch(
'erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_revocation_xml') as generate_xml_fun, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRevocationPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeRevocationRequestController.generate_json'):
self.unlock_revocation_with_valid_input.process()
self.assertFalse(generate_xml_fun.call_args.kwargs['use_testmerker'])
def test_if_idnr_and_request_needs_test_merker_then_create_xml_is_called_with_true(self):
with patch(
'erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_revocation_xml') as generate_xml_fun, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRevocationPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeRevocationRequestController.generate_json'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.request_needs_testmerker', MagicMock(return_value=True)):
self.unlock_revocation_without_idnr.process()
self.assertTrue(generate_xml_fun.call_args.kwargs['use_testmerker'])
def test_if_idnr_and_request_needs_test_merker_then_create_xml_is_called_with_false(self):
with patch(
'erica.erica_legacy.elster_xml.elster_xml_generator.generate_full_vast_revocation_xml') as generate_xml_fun, \
patch(
'erica.erica_legacy.pyeric.pyeric_controller.UnlockCodeRevocationPyericProcessController.get_eric_response'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.UnlockCodeRevocationRequestController.generate_json'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.request_needs_testmerker', MagicMock(return_value=False)):
self.unlock_revocation_without_idnr.process()
self.assertFalse(generate_xml_fun.call_args.kwargs['use_testmerker'])
class TestUnlockCodeRevocationGenerateJson(unittest.TestCase):
def setUp(self):
self.expected_idnr = "123456789"
self.expected_request_id = 'J-KLAPAUCIUS'
self.expected_transferticket = 'The time is always right to do what is right.'
self.expected_eric_response = "We are now faced with the fact that tomorrow is today."
response_with_correct_transferticket = replace_text_in_xml(
read_text_from_sample('sample_vast_revocation_response.xml'),
'TransferTicket', self.expected_transferticket)
self.expected_server_response = response_with_correct_transferticket
def test_if_id_given_and_include_true_then_return_json_with_correct_info(self):
expected_output = {
'transferticket': self.expected_transferticket,
'elster_request_id': self.expected_request_id,
'eric_response': self.expected_eric_response,
'server_response': self.expected_server_response
}
unlock_code_request = UnlockCodeRevocationRequestController(UnlockCodeRevocationData(
idnr=self.expected_idnr,
elster_request_id='lookanotherrequestid'), include_elster_responses=True)
pyeric_response = PyericResponse(self.expected_eric_response, self.expected_server_response)
with patch('erica.erica_legacy.request_processing.requests_controller.get_antrag_id_from_xml',
MagicMock(return_value=self.expected_request_id)):
actual_response = unlock_code_request.generate_json(pyeric_response)
self.assertEqual(expected_output, actual_response)
def test_if_id_given_and_include_false_then_return_json_with_correct_info(self):
expected_output = {
'transferticket': self.expected_transferticket,
'elster_request_id': self.expected_request_id
}
unlock_code_request = UnlockCodeRevocationRequestController(UnlockCodeRevocationData(
idnr=self.expected_idnr,
elster_request_id='lookanotherrequestid'), include_elster_responses=False)
pyeric_response = PyericResponse(self.expected_eric_response, self.expected_server_response)
with patch('erica.erica_legacy.request_processing.requests_controller.get_antrag_id_from_xml',
MagicMock(return_value=self.expected_request_id)):
actual_response = unlock_code_request.generate_json(pyeric_response)
self.assertEqual(expected_output, actual_response)
def test_if_eric_process_successful_then_return_correct_transferticket(self):
expected_transferticket = "PizzaAndNutCake"
unlock_code_revocation = UnlockCodeRevocationRequestController(UnlockCodeRevocationData(idnr=self.expected_idnr,
elster_request_id='42'),
include_elster_responses=False)
successful_server_response = replace_text_in_xml(read_text_from_sample('sample_vast_revocation_response.xml'),
"TransferTicket", expected_transferticket)
pyeric_response = PyericResponse('eric_response', successful_server_response)
actual_response = unlock_code_revocation.generate_json(pyeric_response)
self.assertEqual(expected_transferticket, actual_response['transferticket'])
class TestCheckTaxNumberRequestControllerProcess:
@pytest.mark.skipif(missing_cert(), reason="skipped because of missing cert.pfx; see pyeric/README.md")
@pytest.mark.skipif(missing_pyeric_lib(), reason="skipped because of missing eric lib; see pyeric/README.md")
def test_if_tax_number_is_valid_then_return_json_with_is_valid_true(self):
state_abbreviation = "by"
valid_tax_number = "19811310010"
input_data = CheckTaxNumberPayload(state_abbreviation=state_abbreviation, tax_number=valid_tax_number)
result = CheckTaxNumberRequestController(input_data).process()
assert result == {'is_valid': True}
@pytest.mark.skipif(missing_cert(), reason="skipped because of missing cert.pfx; see pyeric/README.md")
@pytest.mark.skipif(missing_pyeric_lib(), reason="skipped because of missing eric lib; see pyeric/README.md")
def test_if_tax_number_is_invalid_then_return_json_with_is_valid_false(self):
state_abbreviation = "by"
invalid_tax_number = "19811310011" # is invalid because of incorrect check sum (last digit should be 0)
input_data = CheckTaxNumberPayload(state_abbreviation=state_abbreviation, tax_number=invalid_tax_number)
result = CheckTaxNumberRequestController(input_data).process()
assert result == {'is_valid': False}
@pytest.mark.skipif(missing_cert(), reason="skipped because of missing cert.pfx; see pyeric/README.md")
@pytest.mark.skipif(missing_pyeric_lib(), reason="skipped because of missing eric lib; see pyeric/README.md")
def test_if_generate_electronic_steuernummer_raises_invalid_bufa_nr_then_return_json_with_is_valid_false(self):
state_abbreviation = "by"
valid_tax_number = "19811310010"
input_data = CheckTaxNumberPayload(state_abbreviation=state_abbreviation, tax_number=valid_tax_number)
with patch('erica.erica_legacy.request_processing.requests_controller.generate_electronic_steuernummer', MagicMock(side_effect=InvalidBufaNumberError)):
result = CheckTaxNumberRequestController(input_data).process()
assert result == {'is_valid': False}
class TestGetBelegeRequestController(unittest.TestCase):
def setUp(self):
self.idnr = '04452397687'
self.input_data = GetAddressData.parse_obj({'idnr': self.idnr})
self.request_xml = '<Anfrage>'
self.sample_beleg_ids = ['vg3071ovc201t97gdvyy1851qrutaheh']
self.sample_encrypted_belege = [read_text_from_sample('sample_encrypted_beleg.xml')]
def test_get_beleg_ids_calls_correct_pyeric_controller_with_correct_argument(self):
with patch(
'erica.erica_legacy.request_processing.requests_controller.BelegIdRequestPyericProcessController.__init__',
MagicMock(return_value=None)) as pyeric_controller_mock, \
patch(
'erica.erica_legacy.request_processing.requests_controller.elster_xml_generator.generate_full_vast_beleg_ids_request_xml',
MagicMock(return_value=self.request_xml)), \
patch(
'erica.erica_legacy.request_processing.requests_controller.BelegIdRequestPyericProcessController.get_eric_response'), \
patch('erica.erica_legacy.request_processing.requests_controller.get_relevant_beleg_ids'):
GetBelegeRequestController(self.input_data)._request_beleg_ids()
pyeric_controller_mock.assert_called_once_with(self.request_xml)
def test_get_beleg_ids_calls_get_eric_response(self):
with patch(
'erica.erica_legacy.request_processing.requests_controller.elster_xml_generator.generate_full_vast_beleg_ids_request_xml',
MagicMock(return_value=self.request_xml)), \
patch(
'erica.erica_legacy.request_processing.requests_controller.BelegIdRequestPyericProcessController.get_eric_response') as fun_get_eric_response, \
patch('erica.erica_legacy.request_processing.requests_controller.get_relevant_beleg_ids'):
GetBelegeRequestController(self.input_data)._request_beleg_ids()
fun_get_eric_response.assert_called_once()
def test_get_beleg_ids_returns_relevant_beleg_ids_from_pyeric_response_for_given_beleg_art(self):
mocked_pyeric_response = PyericResponse('', read_text_from_sample('sample_beleg_id_response.xml'))
with patch(
'erica.erica_legacy.request_processing.requests_controller.elster_xml_generator.generate_full_vast_beleg_ids_request_xml',
MagicMock(return_value=self.request_xml)), \
patch(
'erica.erica_legacy.request_processing.requests_controller.BelegIdRequestPyericProcessController.get_eric_response',
MagicMock(return_value=mocked_pyeric_response)):
request_controller = GetBelegeRequestController(self.input_data)
request_controller._NEEDED_BELEG_ART = 'VaSt_Pers1'
returned_beleg_ids = request_controller._request_beleg_ids()
self.assertEqual(['vg3071ovc201t97gdvyy1851qrutaheh'], returned_beleg_ids)
def test_request_encrypted_belege_calls_correct_pyeric_controller_with_correct_argument(self):
with patch(
'erica.erica_legacy.request_processing.requests_controller.BelegRequestPyericProcessController.__init__',
MagicMock(return_value=None)) as pyeric_controller_mock, \
patch(
'erica.erica_legacy.request_processing.requests_controller.elster_xml_generator.generate_full_vast_beleg_request_xml',
MagicMock(return_value=self.request_xml)), \
patch(
'erica.erica_legacy.request_processing.requests_controller.BelegRequestPyericProcessController.get_eric_response'), \
patch('erica.erica_legacy.request_processing.requests_controller.get_elements_text_from_xml'):
GetBelegeRequestController(self.input_data)._request_encrypted_belege(self.sample_beleg_ids)
pyeric_controller_mock.assert_called_once_with(self.request_xml)
def test_request_encrypted_belege_calls_get_eric_response(self):
with patch(
'erica.erica_legacy.request_processing.requests_controller.elster_xml_generator.generate_full_vast_beleg_request_xml',
MagicMock(return_value=self.request_xml)), \
patch(
'erica.erica_legacy.request_processing.requests_controller.BelegRequestPyericProcessController.get_eric_response') as fun_get_eric_response, \
patch('erica.erica_legacy.request_processing.requests_controller.get_elements_text_from_xml'):
GetBelegeRequestController(self.input_data)._request_encrypted_belege(self.sample_beleg_ids)
fun_get_eric_response.assert_called_once()
def test_request_encrypted_belege_returns_relevant_beleg_ids_from_pyeric_response(self):
sample_encrypted_beleg = 'SpeakFriendAndEnter'
sample_response_with_encrypted_beleg = replace_text_in_xml(
read_text_from_sample('sample_encrypted_beleg_response.xml'), 'Datenpaket',
sample_encrypted_beleg)
mocked_pyeric_response = PyericResponse('', sample_response_with_encrypted_beleg)
with patch(
'erica.erica_legacy.request_processing.requests_controller.elster_xml_generator.generate_full_vast_beleg_request_xml',
MagicMock(return_value=self.request_xml)), \
patch(
'erica.erica_legacy.request_processing.requests_controller.BelegRequestPyericProcessController.get_eric_response',
MagicMock(return_value=mocked_pyeric_response)):
request_controller = GetBelegeRequestController(self.input_data)
returned_encrypted_belege = request_controller._request_encrypted_belege(self.sample_beleg_ids)
self.assertEqual([sample_encrypted_beleg], returned_encrypted_belege)
def test_request_decrypted_belege_calls_get_decrypted_belege_of_correct_pyeric_controller(self):
with patch(
'erica.erica_legacy.request_processing.requests_controller.DecryptBelegePyericController.get_decrypted_belege') as fun_get_eric_response, \
patch('erica.erica_legacy.request_processing.requests_controller.get_belege_xml'):
GetBelegeRequestController(self.input_data)._request_decrypted_belege(self.sample_encrypted_belege)
fun_get_eric_response.assert_called_once_with(self.sample_encrypted_belege)
def test_request_decrypted_belege_returns_decrypted_belege(self):
sample_beleg_xml = read_text_from_sample('sample_decrypted_beleg_response.xml')
with patch(
'erica.erica_legacy.request_processing.requests_controller.DecryptBelegePyericController.get_decrypted_belege',
MagicMock(return_value=[sample_beleg_xml])):
request_controller = GetBelegeRequestController(self.input_data)
returned_decrypted_belege_xml = request_controller._request_decrypted_belege(
self.sample_encrypted_belege)
len_of_namespace_intro = len(
'<?xml version="1.0" encoding="ISO-8859-15" ?><VaSt_RBM xmlns="http://finkonsens.de/elster/elstervast/vastrbm/v202001" version="202001">')
self.assertIn(sample_beleg_xml[len_of_namespace_intro], returned_decrypted_belege_xml)
self.assertIn('<Belege', returned_decrypted_belege_xml)
class TestGetAddressProcess(unittest.TestCase):
def setUp(self):
self.known_idnr = '02293417683'
self.get_address_with_valid_input = GetAddressRequestController(GetAddressData(idnr=self.known_idnr))
def test_calls_get_relevant_beleg_ids_with_correct_arguments(self):
mock_server_response = 'server_response'
mock_pyeric_response = PyericResponse('', mock_server_response)
with patch(
'erica.erica_legacy.request_processing.requests_controller.elster_xml_generator.generate_full_vast_beleg_ids_request_xml'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.BelegIdRequestPyericProcessController.get_eric_response',
MagicMock(return_value=mock_pyeric_response)), \
patch(
'erica.erica_legacy.request_processing.requests_controller.get_relevant_beleg_ids') as fun_get_beleg_ids, \
patch(
'erica.erica_legacy.request_processing.requests_controller.GetBelegeRequestController._request_encrypted_belege'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.GetBelegeRequestController._request_decrypted_belege'), \
patch(
'erica.erica_legacy.request_processing.requests_controller.GetAddressRequestController.generate_json'):
self.get_address_with_valid_input.process()
fun_get_beleg_ids.assert_called_once_with(mock_server_response, ['VaSt_Pers1'])
class TestGetAddressGenerateJson(unittest.TestCase):
def setUp(self):
self.expected_idnr = "123456789"
self.expected_address = '<Str>Musterstraße</Str>'
self.expected_eric_response = "We are now faced with the fact that tomorrow is today."
response_with_correct_address = replace_subtree_in_xml(read_text_from_sample('sample_beleg_address_response.xml'),
'AdrKette', self.expected_address)
self.expected_server_response = response_with_correct_address
def test_if_id_given_and_include_true_then_return_json_with_correct_info(self):
expected_output = {
'address': self.expected_address,
'eric_response': self.expected_eric_response,
'server_response': self.expected_server_response
}
get_address_request = GetAddressRequestController(GetAddressData(idnr=self.expected_idnr),
include_elster_responses=True)
pyeric_response = PyericResponse(self.expected_eric_response, self.expected_server_response)
with patch('erica.erica_legacy.request_processing.requests_controller.get_address_from_xml',
MagicMock(return_value=self.expected_address)):
actual_response = get_address_request.generate_json(pyeric_response)
self.assertEqual(expected_output, actual_response)
def test_if_id_given_and_include_false_then_return_json_with_correct_info(self):
expected_output = {
'address': self.expected_address
}
unlock_code_request = GetAddressRequestController(GetAddressData(idnr=self.expected_idnr),
include_elster_responses=False)
pyeric_response = PyericResponse(self.expected_eric_response, self.expected_server_response)
with patch('erica.erica_legacy.request_processing.requests_controller.get_address_from_xml',
MagicMock(return_value=self.expected_address)):
actual_response = unlock_code_request.generate_json(pyeric_response)
self.assertEqual(expected_output, actual_response)
| 57.540123
| 187
| 0.731499
| 5,921
| 55,929
| 6.440128
| 0.053032
| 0.039521
| 0.057065
| 0.071593
| 0.856708
| 0.840554
| 0.822983
| 0.797965
| 0.786479
| 0.771426
| 0
| 0.009365
| 0.196177
| 55,929
| 971
| 188
| 57.599382
| 0.838824
| 0.001806
| 0
| 0.704575
| 0
| 0.001307
| 0.281487
| 0.228983
| 0
| 0
| 0
| 0
| 0.082353
| 1
| 0.09281
| false
| 0
| 0.019608
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9f7fd2d10ca3043b2a20bc76cc78b55d192b3ea
| 43,005
|
py
|
Python
|
code/python/IRNConfiguration/v1/fds/sdk/IRNConfiguration/api/contacts___types_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/IRNConfiguration/v1/fds/sdk/IRNConfiguration/api/contacts___types_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/IRNConfiguration/v1/fds/sdk/IRNConfiguration/api/contacts___types_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
IRN API v1
Allows users to extract, create, update and configure IRN data. # noqa: E501
The version of the OpenAPI document: 1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from multiprocessing.pool import ApplyResult
import typing
from fds.sdk.IRNConfiguration.api_client import ApiClient, Endpoint as _Endpoint
from fds.sdk.IRNConfiguration.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from fds.sdk.IRNConfiguration.exceptions import ApiException
from fds.sdk.IRNConfiguration.model.contact_type_dto import ContactTypeDto
from fds.sdk.IRNConfiguration.model.new_item_dto import NewItemDto
from fds.sdk.IRNConfiguration.model.problem_details import ProblemDetails
class ContactsTypesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_contact_type_endpoint = _Endpoint(
settings={
'response_type': (
{ 201: (NewItemDto,), 400: (ProblemDetails,), 0: (ProblemDetails,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/contact-types',
'operation_id': 'create_contact_type',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
'body',
]
},
root_map={
'validations': {
('body',): {
'max_length': 50,
'min_length': 0,
},
},
'allowed_values': {
},
'openapi_types': {
'body':
(str,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json-patch+json',
'application/json',
'text/json',
'application/*+json'
]
},
api_client=api_client
)
self.delete_contact_type_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/contact-types/{contactTypeId}',
'operation_id': 'delete_contact_type',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'contact_type_id',
],
'required': [
'contact_type_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'contact_type_id':
(str,),
},
'attribute_map': {
'contact_type_id': 'contactTypeId',
},
'location_map': {
'contact_type_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_contact_types_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: ([ContactTypeDto],), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/contact-types',
'operation_id': 'get_contact_types',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_contact_type_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/contact-types/{contactTypeId}',
'operation_id': 'update_contact_type',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'contact_type_id',
'body',
],
'required': [
'contact_type_id',
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
'body',
]
},
root_map={
'validations': {
('body',): {
'max_length': 50,
'min_length': 0,
},
},
'allowed_values': {
},
'openapi_types': {
'contact_type_id':
(str,),
'body':
(str,),
},
'attribute_map': {
'contact_type_id': 'contactTypeId',
},
'location_map': {
'contact_type_id': 'path',
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json-patch+json',
'application/json',
'text/json',
'application/*+json'
]
},
api_client=api_client
)
@staticmethod
def apply_kwargs_defaults(kwargs, return_http_data_only, async_req):
kwargs["async_req"] = async_req
kwargs["_return_http_data_only"] = return_http_data_only
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
def create_contact_type(
self,
body,
**kwargs
) -> NewItemDto:
"""Create contact types # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
body (str): Contact type name
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
NewItemDto
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['body'] = \
body
return self.create_contact_type_endpoint.call_with_http_info(**kwargs)
def create_contact_type_with_http_info(
self,
body,
**kwargs
) -> typing.Tuple[NewItemDto, int, typing.MutableMapping]:
"""Create contact types # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
body (str): Contact type name
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
NewItemDto
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['body'] = \
body
return self.create_contact_type_endpoint.call_with_http_info(**kwargs)
def create_contact_type_async(
self,
body,
**kwargs
) -> "ApplyResult[NewItemDto]":
"""Create contact types # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
body (str): Contact type name
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[NewItemDto]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['body'] = \
body
return self.create_contact_type_endpoint.call_with_http_info(**kwargs)
def create_contact_type_with_http_info_async(
self,
body,
**kwargs
) -> "ApplyResult[typing.Tuple[NewItemDto, int, typing.MutableMapping]]":
"""Create contact types # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
body (str): Contact type name
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(NewItemDto, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['body'] = \
body
return self.create_contact_type_endpoint.call_with_http_info(**kwargs)
def delete_contact_type(
self,
contact_type_id,
**kwargs
) -> None:
"""Delete a contact type # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
contact_type_id (str): contactTypeId to delete associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
None
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['contact_type_id'] = \
contact_type_id
return self.delete_contact_type_endpoint.call_with_http_info(**kwargs)
def delete_contact_type_with_http_info(
self,
contact_type_id,
**kwargs
) -> typing.Tuple[None, int, typing.MutableMapping]:
"""Delete a contact type # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
contact_type_id (str): contactTypeId to delete associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
None
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['contact_type_id'] = \
contact_type_id
return self.delete_contact_type_endpoint.call_with_http_info(**kwargs)
def delete_contact_type_async(
self,
contact_type_id,
**kwargs
) -> "ApplyResult[None]":
"""Delete a contact type # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
contact_type_id (str): contactTypeId to delete associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[None]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['contact_type_id'] = \
contact_type_id
return self.delete_contact_type_endpoint.call_with_http_info(**kwargs)
def delete_contact_type_with_http_info_async(
self,
contact_type_id,
**kwargs
) -> "ApplyResult[typing.Tuple[None, int, typing.MutableMapping]]":
"""Delete a contact type # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
contact_type_id (str): contactTypeId to delete associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(None, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['contact_type_id'] = \
contact_type_id
return self.delete_contact_type_endpoint.call_with_http_info(**kwargs)
def get_contact_types(
self,
**kwargs
) -> [ContactTypeDto]:
"""Get list of the contact types configured in your group # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
[ContactTypeDto]
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
return self.get_contact_types_endpoint.call_with_http_info(**kwargs)
def get_contact_types_with_http_info(
self,
**kwargs
) -> typing.Tuple[[ContactTypeDto], int, typing.MutableMapping]:
"""Get list of the contact types configured in your group # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
[ContactTypeDto]
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
return self.get_contact_types_endpoint.call_with_http_info(**kwargs)
def get_contact_types_async(
self,
**kwargs
) -> "ApplyResult[[ContactTypeDto]]":
"""Get list of the contact types configured in your group # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[[ContactTypeDto]]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
return self.get_contact_types_endpoint.call_with_http_info(**kwargs)
def get_contact_types_with_http_info_async(
self,
**kwargs
) -> "ApplyResult[typing.Tuple[[ContactTypeDto], int, typing.MutableMapping]]":
"""Get list of the contact types configured in your group # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[([ContactTypeDto], int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
return self.get_contact_types_endpoint.call_with_http_info(**kwargs)
def update_contact_type(
self,
contact_type_id,
body,
**kwargs
) -> None:
"""Edit a contact type # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
contact_type_id (str): contactTypeId to update associated record
body (str): Updated contact type name
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
None
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['contact_type_id'] = \
contact_type_id
kwargs['body'] = \
body
return self.update_contact_type_endpoint.call_with_http_info(**kwargs)
def update_contact_type_with_http_info(
self,
contact_type_id,
body,
**kwargs
) -> typing.Tuple[None, int, typing.MutableMapping]:
"""Edit a contact type # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
contact_type_id (str): contactTypeId to update associated record
body (str): Updated contact type name
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
None
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['contact_type_id'] = \
contact_type_id
kwargs['body'] = \
body
return self.update_contact_type_endpoint.call_with_http_info(**kwargs)
def update_contact_type_async(
self,
contact_type_id,
body,
**kwargs
) -> "ApplyResult[None]":
"""Edit a contact type # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
contact_type_id (str): contactTypeId to update associated record
body (str): Updated contact type name
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[None]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['contact_type_id'] = \
contact_type_id
kwargs['body'] = \
body
return self.update_contact_type_endpoint.call_with_http_info(**kwargs)
def update_contact_type_with_http_info_async(
self,
contact_type_id,
body,
**kwargs
) -> "ApplyResult[typing.Tuple[None, int, typing.MutableMapping]]":
"""Edit a contact type # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
contact_type_id (str): contactTypeId to update associated record
body (str): Updated contact type name
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(None, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['contact_type_id'] = \
contact_type_id
kwargs['body'] = \
body
return self.update_contact_type_endpoint.call_with_http_info(**kwargs)
| 42.411243
| 121
| 0.566167
| 4,756
| 43,005
| 4.955845
| 0.046468
| 0.036657
| 0.026474
| 0.025796
| 0.934663
| 0.918583
| 0.916377
| 0.911837
| 0.904115
| 0.902673
| 0
| 0.003769
| 0.370771
| 43,005
| 1,013
| 122
| 42.45311
| 0.867258
| 0.568794
| 0
| 0.694377
| 1
| 0
| 0.160036
| 0.038186
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04401
| false
| 0
| 0.02445
| 0
| 0.110024
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9fbdb5a7651e603942e32a197a0e2c5cc010f39
| 7,832
|
py
|
Python
|
apps/secure_url/tests/tests_secured_entity_model.py
|
fryta/sercure-url
|
06029e8e3a95616f939f62f04c260d14d128f0b4
|
[
"MIT"
] | null | null | null |
apps/secure_url/tests/tests_secured_entity_model.py
|
fryta/sercure-url
|
06029e8e3a95616f939f62f04c260d14d128f0b4
|
[
"MIT"
] | 7
|
2020-02-11T23:49:48.000Z
|
2022-01-13T01:05:42.000Z
|
apps/secure_url/tests/tests_secured_entity_model.py
|
fryta/secure-url
|
06029e8e3a95616f939f62f04c260d14d128f0b4
|
[
"MIT"
] | null | null | null |
import tempfile
from datetime import timedelta
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls.base import reverse
from ..constants import SecuredEntityTypes
from ..models import SecuredEntity
class SecuredEntityModelTest(TestCase):
def setUp(self):
username = 'test'
password = '123qweasd'
self.user = get_user_model().objects.create_user(username=username, password=password)
self.url = 'https://www.facebook.com/'
self.tmp_file = tempfile.NamedTemporaryFile(suffix='.jpg')
def test_new_model_instance_has_proper_type__url(self):
secured_entity = SecuredEntity(user=self.user,
url=self.url)
secured_entity.save()
self.assertEqual(secured_entity.type, SecuredEntityTypes.LINK)
def test_new_model_instance_has_proper_password_salt__url(self):
secured_entity = SecuredEntity(user=self.user,
url=self.url)
secured_entity.save()
self.assertEqual(len(secured_entity.password_salt), 32)
def test_new_model_instance_has_proper_password__url(self):
secured_entity = SecuredEntity(user=self.user,
url=self.url)
secured_entity.save()
self.assertEqual(len(secured_entity.password), 12)
def test_new_model_instance_has_empty_file_and_not_empty_url__url(self):
secured_entity = SecuredEntity(user=self.user,
url=self.url)
secured_entity.save()
self.assertFalse(secured_entity.file)
self.assertTrue(secured_entity.url)
def test_new_model_instance_is_accessible_after_create__url(self):
secured_entity = SecuredEntity(user=self.user,
url=self.url)
secured_entity.save()
self.assertTrue(secured_entity.is_accessible)
def test_new_model_instance_is_accessible_just_before_expire__url(self):
secured_entity = SecuredEntity(user=self.user,
url=self.url)
secured_entity.save()
SecuredEntity.objects.filter(pk=secured_entity.pk).update(
created=secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
secured_entity = SecuredEntity.objects.get(pk=secured_entity.pk)
self.assertTrue(secured_entity.is_accessible)
def test_new_model_instance_is_not_accessible_just_after_expire__url(self):
secured_entity = SecuredEntity(user=self.user,
url=self.url)
secured_entity.save()
SecuredEntity.objects.filter(pk=secured_entity.pk).update(
created=secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
secured_entity = SecuredEntity.objects.get(pk=secured_entity.pk)
self.assertFalse(secured_entity.is_accessible)
def test_new_model_instance_generates_proper_absolute_url__url(self):
secured_entity = SecuredEntity(user=self.user,
url=self.url)
secured_entity.save()
self.assertEqual(secured_entity.get_absolute_url(), reverse('secure_url:secured-entity-detail-view',
kwargs={'pk': secured_entity.pk}))
def test_new_model_instance_generates_proper_redirect_url__url(self):
secured_entity = SecuredEntity(user=self.user,
url=self.url)
secured_entity.save()
self.assertEqual(secured_entity.get_redirect_url(), self.url)
def test_new_model_instance_regenerates_password_properly__url(self):
secured_entity = SecuredEntity(user=self.user,
url=self.url)
secured_entity.save()
old_password = secured_entity.password
secured_entity.regenerate_password()
self.assertNotEqual(old_password, secured_entity.password)
def test_new_model_instance_has_proper_type__file(self):
secured_entity = SecuredEntity(user=self.user,
file=self.tmp_file.name)
secured_entity.save()
self.assertEqual(secured_entity.type, SecuredEntityTypes.FILE)
def test_new_model_instance_has_proper_password_salt__file(self):
secured_entity = SecuredEntity(user=self.user,
file=self.tmp_file.name)
secured_entity.save()
self.assertEqual(len(secured_entity.password_salt), 32)
def test_new_model_instance_has_proper_password__file(self):
secured_entity = SecuredEntity(user=self.user,
file=self.tmp_file.name)
secured_entity.save()
self.assertEqual(len(secured_entity.password), 12)
def test_new_model_instance_has_empty_url_and_not_empty_file__file(self):
secured_entity = SecuredEntity(user=self.user,
file=self.tmp_file.name)
secured_entity.save()
self.assertTrue(secured_entity.file)
self.assertFalse(secured_entity.url)
def test_new_model_instance_is_accessible_after_create__file(self):
secured_entity = SecuredEntity(user=self.user,
file=self.tmp_file.name)
secured_entity.save()
self.assertTrue(secured_entity.is_accessible)
def test_new_model_instance_is_accessible_just_before_expire__file(self):
secured_entity = SecuredEntity(user=self.user,
file=self.tmp_file.name)
secured_entity.save()
SecuredEntity.objects.filter(pk=secured_entity.pk).update(
created=secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
secured_entity = SecuredEntity.objects.get(pk=secured_entity.pk)
self.assertTrue(secured_entity.is_accessible)
def test_new_model_instance_is_not_accessible_just_after_expire__file(self):
secured_entity = SecuredEntity(user=self.user,
file=self.tmp_file.name)
secured_entity.save()
SecuredEntity.objects.filter(pk=secured_entity.pk).update(
created=secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
secured_entity = SecuredEntity.objects.get(pk=secured_entity.pk)
self.assertFalse(secured_entity.is_accessible)
def test_new_model_instance_generates_proper_absolute_url__file(self):
secured_entity = SecuredEntity(user=self.user,
file=self.tmp_file.name)
secured_entity.save()
self.assertEqual(secured_entity.get_absolute_url(), reverse('secure_url:secured-entity-detail-view',
kwargs={'pk': secured_entity.pk}))
def test_new_model_instance_generates_proper_redirect_url__file(self):
secured_entity = SecuredEntity(user=self.user,
file=self.tmp_file.name)
secured_entity.save()
self.assertEqual(secured_entity.get_redirect_url(),
'{}{}'.format(settings.MEDIA_URL.rstrip('/'), self.tmp_file.name))
def test_new_model_instance_regenerates_password_properly__file(self):
secured_entity = SecuredEntity(user=self.user,
file=self.tmp_file.name)
secured_entity.save()
old_password = secured_entity.password
secured_entity.regenerate_password()
self.assertNotEqual(old_password, secured_entity.password)
| 40.580311
| 108
| 0.661389
| 870
| 7,832
| 5.581609
| 0.103448
| 0.240939
| 0.128501
| 0.061779
| 0.876647
| 0.876647
| 0.87603
| 0.87603
| 0.84061
| 0.813427
| 0
| 0.002587
| 0.259704
| 7,832
| 192
| 109
| 40.791667
| 0.834943
| 0
| 0
| 0.676471
| 0
| 0
| 0.01596
| 0.009448
| 0
| 0
| 0
| 0
| 0.161765
| 1
| 0.154412
| false
| 0.132353
| 0.058824
| 0
| 0.220588
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
8a10019ea7edea9d64abaa2ea33aa5ff7766b4f0
| 191
|
py
|
Python
|
stroylux/main/payment/admin.py
|
vladkoblynsky/shop
|
aaf027f4111605772624a868a0243b221b97c857
|
[
"BSD-3-Clause"
] | null | null | null |
stroylux/main/payment/admin.py
|
vladkoblynsky/shop
|
aaf027f4111605772624a868a0243b221b97c857
|
[
"BSD-3-Clause"
] | 7
|
2020-09-19T16:24:46.000Z
|
2022-01-13T03:19:46.000Z
|
stroylux/main/payment/admin.py
|
vladkoblynsky/shop
|
aaf027f4111605772624a868a0243b221b97c857
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from .models import Payment, Transaction, PaymentMethod
@admin.register(Payment, Transaction, PaymentMethod)
class ProductAdmin(admin.ModelAdmin):
pass
| 23.875
| 55
| 0.811518
| 21
| 191
| 7.380952
| 0.666667
| 0.232258
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115183
| 191
| 7
| 56
| 27.285714
| 0.91716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
8a14fdae11a88b577d8fecdb4123028e9d29d430
| 4,560
|
py
|
Python
|
tests/Composition/test_Composition__grad_species_mole_fractions.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
tests/Composition/test_Composition__grad_species_mole_fractions.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
tests/Composition/test_Composition__grad_species_mole_fractions.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import multipy
################################################################################
################################################################################
####
#### Class: Composition
####
################################################################################
################################################################################
class Composition(unittest.TestCase):
def test_Composition__grad_species_mole_fractions__allowed_calls(self):
species_mole_fractions = np.random.rand(5,100)
try:
comp = multipy.Composition()
gradients = comp.grad_species_mole_fractions(species_mole_fractions, delta=0.01)
(n_species, n_observations) = np.shape(gradients)
self.assertTrue(n_species==5)
self.assertTrue(n_observations==100)
except Exception:
self.assertTrue(False)
try:
comp = multipy.Composition()
gradients = comp.grad_species_mole_fractions(species_mole_fractions, delta=1)
(n_species, n_observations) = np.shape(gradients)
self.assertTrue(n_species==5)
self.assertTrue(n_observations==100)
except Exception:
self.assertTrue(False)
try:
comp = multipy.Composition()
gradients = comp.grad_species_mole_fractions(species_mole_fractions, delta=0.01, edge_order=2)
(n_species, n_observations) = np.shape(gradients)
self.assertTrue(n_species==5)
self.assertTrue(n_observations==100)
except Exception:
self.assertTrue(False)
################################################################################
################################################################################
def test_Composition__grad_species_mole_fractions__not_allowed_calls(self):
species_mole_fractions = np.random.rand(5,)
comp = multipy.Composition()
with self.assertRaises(ValueError):
gradients = comp.grad_species_mole_fractions(species_mole_fractions, delta=0.01)
with self.assertRaises(ValueError):
gradients = comp.grad_species_mole_fractions([1,2,3], delta=0.01)
species_mole_fractions = np.random.rand(5,100)
with self.assertRaises(ValueError):
gradients = comp.grad_species_mole_fractions(species_mole_fractions, delta=[1])
with self.assertRaises(ValueError):
gradients = comp.grad_species_mole_fractions(species_mole_fractions, delta='nones')
with self.assertRaises(ValueError):
gradients = comp.grad_species_mole_fractions(species_mole_fractions, delta=0.01, edge_order=0)
with self.assertRaises(ValueError):
gradients = comp.grad_species_mole_fractions(species_mole_fractions, delta=0.01, edge_order=3)
with self.assertRaises(ValueError):
gradients = comp.grad_species_mole_fractions(species_mole_fractions, delta=0.01, edge_order=[1])
################################################################################
################################################################################
def test_Composition__grad_species_mole_fractions__computation(self):
x = np.linspace(1,2,100)
delta_x = x[2] - x[1]
quad = x**2
cub = x**3
quad_grad = 2*x
cub_grad = 3*x**2
data = np.vstack((quad[None,:], cub[None,:]))
expected_result = np.vstack((quad_grad[None,:], cub_grad[None,:]))
try:
comp = multipy.Composition()
gradient_data = comp.grad_species_mole_fractions(data, delta_x)
for i in range(1, 99):
self.assertTrue(abs(gradient_data[0,i] - expected_result[0,i]) < 10e-10)
self.assertTrue(abs(gradient_data[0,0] - expected_result[0,0]) < 0.1)
self.assertTrue(abs(gradient_data[0,-1] - expected_result[0,-1]) < 0.1)
except Exception:
self.assertTrue(False)
try:
comp = multipy.Composition()
gradient_data = comp.grad_species_mole_fractions(data, delta_x, edge_order=2)
for i in range(0, 100):
self.assertTrue(abs(gradient_data[0,i] - expected_result[0,i]) < 10e-10)
except Exception:
self.assertTrue(False)
################################################################################
################################################################################
| 42.616822
| 108
| 0.533114
| 454
| 4,560
| 5.085903
| 0.140969
| 0.128627
| 0.233867
| 0.155912
| 0.845821
| 0.831096
| 0.80511
| 0.750541
| 0.733651
| 0.718926
| 0
| 0.026243
| 0.20614
| 4,560
| 106
| 109
| 43.018868
| 0.611602
| 0.003947
| 0
| 0.589041
| 0
| 0
| 0.001342
| 0
| 0
| 0
| 0
| 0
| 0.30137
| 1
| 0.041096
| false
| 0
| 0.041096
| 0
| 0.09589
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a74f9e9bff9958655e6c64a71b270a4a10f8b2c
| 747
|
py
|
Python
|
tests/parser/ancestor.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/ancestor.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/ancestor.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
married(X,Y) :- husband_of(X,Y).
married(Y,X) :- husband_of(X,Y).
married(X,Y) :- wife_of(X,Y).
married(Y,X) :- wife_of(X,Y).
parent(X,Y) :- father_of(X,Y).
parent(X,Y) :- mother_of(X,Y).
parent(X,Y) :- married(X,Z), father_of(Z,Y).
parent(X,Y) :- married(X,Z), mother_of(Z,Y).
ancestor(X,Y) :- parent(X,Y).
ancestor(X,Y) :- ancestor(X,U), ancestor(U,Y).
"""
output = """
married(X,Y) :- husband_of(X,Y).
married(Y,X) :- husband_of(X,Y).
married(X,Y) :- wife_of(X,Y).
married(Y,X) :- wife_of(X,Y).
parent(X,Y) :- father_of(X,Y).
parent(X,Y) :- mother_of(X,Y).
parent(X,Y) :- married(X,Z), father_of(Z,Y).
parent(X,Y) :- married(X,Z), mother_of(Z,Y).
ancestor(X,Y) :- parent(X,Y).
ancestor(X,Y) :- ancestor(X,U), ancestor(U,Y).
"""
| 25.758621
| 46
| 0.603748
| 156
| 747
| 2.788462
| 0.089744
| 0.137931
| 0.110345
| 0.206897
| 0.974713
| 0.974713
| 0.974713
| 0.974713
| 0.974713
| 0.974713
| 0
| 0
| 0.104418
| 747
| 28
| 47
| 26.678571
| 0.650224
| 0
| 0
| 0.916667
| 0
| 0
| 0.958501
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8aab6cfc5942919e91a614b86494a5015efefe2c
| 68,663
|
py
|
Python
|
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/backup_results_unknownr/EightThreads_leslie3d/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/backup_results_unknownr/EightThreads_leslie3d/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/backup_results_unknownr/EightThreads_leslie3d/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0651675,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.253874,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.337578,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.1891,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.327453,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.187804,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.704358,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.135163,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.74045,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0637757,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00685504,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0745517,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0506972,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.138327,
'Execution Unit/Register Files/Runtime Dynamic': 0.0575522,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.198001,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.52265,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.94381,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000473479,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000473479,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000409802,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00015722,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000728269,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00208503,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0046325,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0487365,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.10006,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.135894,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.165531,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.47133,
'Instruction Fetch Unit/Runtime Dynamic': 0.356879,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.108484,
'L2/Runtime Dynamic': 0.0316228,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.14656,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.970754,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0617749,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.061775,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.43946,
'Load Store Unit/Runtime Dynamic': 1.33718,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.152326,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.304653,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0540612,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0556718,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.19275,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0223336,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.44471,
'Memory Management Unit/Runtime Dynamic': 0.0780054,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 19.7661,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.222499,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0123469,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0951093,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.329955,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 4.07746,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0277673,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.224498,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.14255,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0699257,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.112788,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0569314,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.239645,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0581209,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.19185,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0269307,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.002933,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0319061,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0216913,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0588368,
'Execution Unit/Register Files/Runtime Dynamic': 0.0246243,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0741606,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.196406,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09055,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000210663,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000210663,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000183811,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 7.1333e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000311598,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000916735,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00200827,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0208524,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.32639,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0576799,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0708242,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.60928,
'Instruction Fetch Unit/Runtime Dynamic': 0.152282,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0451777,
'L2/Runtime Dynamic': 0.0132532,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.06334,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.419427,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0267302,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0267302,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.18957,
'Load Store Unit/Runtime Dynamic': 0.577981,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0659122,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.131824,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0233924,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0240613,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0824701,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00948431,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.278763,
'Memory Management Unit/Runtime Dynamic': 0.0335456,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.9041,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0708425,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.004017,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0345831,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.109443,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.97705,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0283227,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.224934,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.146998,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0708216,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.114233,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0576608,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.242715,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.058463,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.19953,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.027771,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00297058,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0323268,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0219692,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0600978,
'Execution Unit/Register Files/Runtime Dynamic': 0.0249398,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0751857,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.201665,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09963,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000192217,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000192217,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00016776,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 6.51278e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00031559,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000867784,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00183086,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0211196,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.34339,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0568315,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0717316,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.6271,
'Instruction Fetch Unit/Runtime Dynamic': 0.152381,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0456816,
'L2/Runtime Dynamic': 0.0117516,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.10382,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.43695,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0280398,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0280399,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.23623,
'Load Store Unit/Runtime Dynamic': 0.603273,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0691415,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.138283,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0245385,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0252131,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0835269,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0093509,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.281789,
'Memory Management Unit/Runtime Dynamic': 0.034564,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.9798,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0730533,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00408432,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0350311,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.112169,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.01377,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0257683,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.222928,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.132156,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0653942,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.105478,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0532419,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.224115,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0545298,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.16628,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0249672,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00274293,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0297665,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0202856,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0547336,
'Execution Unit/Register Files/Runtime Dynamic': 0.0230285,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0691531,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.186581,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.06203,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00017651,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00017651,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000154036,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 5.97916e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000291405,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00079846,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00168179,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0195011,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.24044,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.051339,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0662345,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.51915,
'Instruction Fetch Unit/Runtime Dynamic': 0.139555,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0441751,
'L2/Runtime Dynamic': 0.0114216,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.04701,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.408807,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0262019,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0262019,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.17074,
'Load Store Unit/Runtime Dynamic': 0.564227,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0646094,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.129219,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0229301,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0235835,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0771258,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00844603,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.272624,
'Memory Management Unit/Runtime Dynamic': 0.0320295,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.7624,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0656771,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00374968,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0324145,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.101841,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.9111,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 8.578190758719012,
'Runtime Dynamic': 8.578190758719012,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.385642,
'Runtime Dynamic': 0.146137,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 61.7981,
'Peak Power': 94.9104,
'Runtime Dynamic': 10.1255,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 61.4125,
'Total Cores/Runtime Dynamic': 9.97939,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.385642,
'Total L3s/Runtime Dynamic': 0.146137,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.123632
| 124
| 0.682289
| 8,085
| 68,663
| 5.788497
| 0.067285
| 0.123419
| 0.112821
| 0.093333
| 0.939167
| 0.930684
| 0.918312
| 0.887265
| 0.862265
| 0.841603
| 0
| 0.132715
| 0.224153
| 68,663
| 914
| 125
| 75.123632
| 0.745795
| 0
| 0
| 0.642232
| 0
| 0
| 0.656894
| 0.04806
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
76dc8f520fd7c8a220895dafa224af379640bff4
| 36,328
|
py
|
Python
|
tests/test_base.py
|
bgorr/instrupy
|
e3dca871ce2dcd2ef279898fcc36bf9d18f0c243
|
[
"Apache-2.0"
] | null | null | null |
tests/test_base.py
|
bgorr/instrupy
|
e3dca871ce2dcd2ef279898fcc36bf9d18f0c243
|
[
"Apache-2.0"
] | null | null | null |
tests/test_base.py
|
bgorr/instrupy
|
e3dca871ce2dcd2ef279898fcc36bf9d18f0c243
|
[
"Apache-2.0"
] | null | null | null |
"""Unit tests for instrupy.base.
"""
import unittest
import numpy as np
import random
from deepdiff import DeepDiff
from instrupy import InstrumentModelFactory, Instrument
from instrupy.basic_sensor_model import BasicSensorModel
from instrupy.passive_optical_scanner_model import PassiveOpticalScannerModel
from instrupy.synthetic_aperture_radar_model import SyntheticApertureRadarModel
from instrupy.radiometer_model import RadiometerModel
from instrupy.util import SphericalGeometry, Orientation, ViewGeometry, Maneuver
class TestInstrumentModelFactory(unittest.TestCase):
class DummyNewInstrument:
def __init__(self, *args, **kwargs):
pass
def from_dict(self):
return TestInstrumentModelFactory.DummyNewInstrument()
def test___init__(self):
factory = InstrumentModelFactory()
# test the built-in instrumnet models are registered
self.assertIn('Basic Sensor', factory._creators)
self.assertEqual(factory._creators['Basic Sensor'], BasicSensorModel)
self.assertIn('Passive Optical Scanner', factory._creators)
self.assertEqual(factory._creators['Passive Optical Scanner'], PassiveOpticalScannerModel)
self.assertIn('Synthetic Aperture Radar', factory._creators)
self.assertEqual(factory._creators['Synthetic Aperture Radar'], SyntheticApertureRadarModel)
self.assertIn('Radiometer', factory._creators)
self.assertEqual(factory._creators['Radiometer'], RadiometerModel)
def test_register_instrument_model(self):
factory = InstrumentModelFactory()
factory.register_instrument_model('New Instrument 2021', TestInstrumentModelFactory.DummyNewInstrument)
self.assertIn('New Instrument 2021', factory._creators)
self.assertEqual(factory._creators['New Instrument 2021'], TestInstrumentModelFactory.DummyNewInstrument)
# test the built-in instrumnet models remain registered
self.assertIn('Basic Sensor', factory._creators)
self.assertEqual(factory._creators['Basic Sensor'], BasicSensorModel)
self.assertIn('Passive Optical Scanner', factory._creators)
self.assertEqual(factory._creators['Passive Optical Scanner'], PassiveOpticalScannerModel)
self.assertIn('Synthetic Aperture Radar', factory._creators)
self.assertEqual(factory._creators['Synthetic Aperture Radar'], SyntheticApertureRadarModel)
self.assertIn('Radiometer', factory._creators)
self.assertEqual(factory._creators['Radiometer'], RadiometerModel)
def test_get_instrument_model(self):
factory = InstrumentModelFactory()
# register a dummy instrument model
factory.register_instrument_model('New Instrument 2021', TestInstrumentModelFactory.DummyNewInstrument)
# test the instrument model classes can be obtained depending on the input specifications
# basic sensor model
specs = {"@type": 'Basic Sensor'} # in practice additional instrument specs shall be present in the dictionary
bs_model = factory.get_instrument_model(specs)
self.assertIsInstance(bs_model, BasicSensorModel)
# PassiveOpticalScannerModel,
specs = {"@type": 'Passive Optical Scanner', 'scanTechnique': 'PUSHBROOM', "numberDetectorRows":1, "numberDetectorCols":500, "fieldOfViewGeometry":{"shape":"rectangular", "angleWidth": 5, "angleHeight": 5}} # in practice additional instrument specs shall be present in the dictionary
ps_model = factory.get_instrument_model(specs)
self.assertIsInstance(ps_model, PassiveOpticalScannerModel)
# SyntheticApertureRadarModel
specs = {"@type": 'Synthetic Aperture Radar', "minimumPRF": 2000, "maximumPRF": 8000, "operatingFrequency": 9.6e9, "antennaHeight":5, "antennaWidth":0.5} # in practice additional instrument specs shall be present in the dictionary
sar_model = factory.get_instrument_model(specs)
self.assertIsInstance(sar_model, SyntheticApertureRadarModel)
# RadiometerModel
specs = {"@type": 'Radiometer'} # in practice additional instrument specs shall be present in the dictionary
rad_model = factory.get_instrument_model(specs)
self.assertIsInstance(rad_model, RadiometerModel)
# DummyNewInstrument
specs = {"@type": 'New Instrument 2021'} # in practice additional instrument specs shall be present in the dictionary
di_model = factory.get_instrument_model(specs)
self.assertIsInstance(di_model, TestInstrumentModelFactory.DummyNewInstrument)
class TestInstrument(unittest.TestCase):
bs1 = Instrument.from_json('{"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":2.5 }, \
"sceneFieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"pointingOption": [{"referenceFrame": "NADIR_POINTING", "convention": "XYZ", "xRotation":0, "yRotation":2.5, "zRotation":0}, \
{"referenceFrame": "NADIR_POINTING", "convention": "XYZ", "xRotation":0, "yRotation":-2.5, "zRotation":0} \
], \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor" \
}')
bs2 = Instrument.from_json('{"name": "Beta", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "SINGLE_ROLL_ONLY", "A_rollMin":10, "A_rollMax":15}, \
"mode": [{"@id":101, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}} \
], \
"numberDetectorRows":5, "numberDetectorCols":10, "@type":"Basic Sensor" \
}')
bs3 = Instrument.from_json('{"name": "Gamma", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"fieldOfViewGeometry": {"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10 }, \
"sceneFieldOfViewGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }, \
"maneuver":{"maneuverType": "Double_Roll_Only", "A_rollMin":10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}, \
"pointingOption": [{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":10}, \
{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":15} \
], \
"mode": [{"@id":0, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}}, \
{"@id":1, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": 25}}, \
{ "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25}} \
], \
"numberDetectorRows":5, "numberDetectorCols":10, "@id": "bs3", "@type":"Basic Sensor" \
}')
def test_from_json_basic_sensor(self):
# test initialization with no mode specification
self.assertEqual(TestInstrument.bs1.name, "Alpha")
self.assertEqual(TestInstrument.bs1._id, "bs1")
self.assertEqual(TestInstrument.bs1._type, "Basic Sensor")
self.assertIsInstance(TestInstrument.bs1, Instrument)
self.assertEqual(len(TestInstrument.bs1.mode), 1)
self.assertIsInstance(TestInstrument.bs1.mode[0], BasicSensorModel)
mode0 = TestInstrument.bs1.mode[0]
self.assertEqual(mode0._id, "0")
self.assertEqual(mode0.mass, 10)
self.assertEqual(mode0.volume, 12.45)
self.assertEqual(mode0.dataRate, 40)
self.assertEqual(mode0.bitsPerPixel, 8)
self.assertEqual(mode0.power, 12)
self.assertEqual(mode0.numberDetectorRows, 5)
self.assertEqual(mode0.numberDetectorCols, 10)
self.assertEqual(mode0.orientation, Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
self.assertEqual(mode0.fieldOfView, ViewGeometry.from_dict({"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry": {"shape": "CIRCULAR", "diameter":2.5 }}))
self.assertEqual(mode0.sceneFieldOfView, ViewGeometry.from_dict({"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry": {"shape": "CIRCULAR", "diameter":5 }}))
self.assertEqual(mode0.maneuver, Maneuver.from_json('{"maneuverType": "CIRCULAR", "diameter": 10}'))
self.assertEqual(mode0.fieldOfRegard, [ViewGeometry.from_dict({"orientation": {"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry": {"shape": "CIRCULAR", "diameter":15 }})])
self.assertEqual(mode0.pointingOption, [Orientation.from_dict({"referenceFrame": "NADIR_POINTING", "convention": "XYZ", "xRotation":0, "yRotation":2.5, "zRotation":0}),
Orientation.from_dict({"referenceFrame": "NADIR_POINTING", "convention": "XYZ", "xRotation":0, "yRotation":-2.5, "zRotation":0})])
# test initialization with single mode specification
self.assertEqual(TestInstrument.bs2.name, "Beta")
self.assertIsNotNone(TestInstrument.bs2._id) # a random id shall be assigned
self.assertEqual(TestInstrument.bs2._type, "Basic Sensor")
self.assertIsInstance(TestInstrument.bs2, Instrument)
self.assertEqual(len(TestInstrument.bs2.mode), 1)
self.assertIsInstance(TestInstrument.bs2.mode[0], BasicSensorModel)
mode0 = TestInstrument.bs2.mode[0]
self.assertEqual(mode0._id, 101)
self.assertEqual(mode0.mass, 10)
self.assertEqual(mode0.volume, 12.45)
self.assertEqual(mode0.dataRate, 40)
self.assertEqual(mode0.bitsPerPixel, 8)
self.assertEqual(mode0.power, 12)
self.assertEqual(mode0.numberDetectorRows, 5)
self.assertEqual(mode0.numberDetectorCols, 10)
self.assertEqual(mode0.orientation, Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
self.assertEqual(mode0.fieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry":{"shape": "CIRCULAR", "diameter": 5}}))
self.assertEqual(mode0.sceneFieldOfView, mode0.fieldOfView)
self.assertEqual(mode0.maneuver, Maneuver.from_json('{"maneuverType": "single_ROLL_ONLY", "A_rollMin": 10, "A_rollMax":15}'))
self.assertIsNone(mode0.pointingOption)
# test initialization with multiple mode specifications
self.assertEqual(TestInstrument.bs3.name, "Gamma")
self.assertEqual(TestInstrument.bs3._id, "bs3")
self.assertEqual(TestInstrument.bs3._type, "Basic Sensor")
self.assertIsInstance(TestInstrument.bs3, Instrument)
self.assertEqual(len(TestInstrument.bs3.mode), 3)
self.assertIsInstance(TestInstrument.bs3.mode[0], BasicSensorModel)
# mode0
mode0 = TestInstrument.bs3.mode[0]
self.assertEqual(mode0._id, 0)
self.assertEqual(mode0.mass, 10)
self.assertEqual(mode0.volume, 12.45)
self.assertEqual(mode0.dataRate, 40)
self.assertEqual(mode0.bitsPerPixel, 8)
self.assertEqual(mode0.power, 12)
self.assertEqual(mode0.numberDetectorRows, 5)
self.assertEqual(mode0.numberDetectorCols, 10)
self.assertEqual(mode0.orientation, Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
self.assertEqual(mode0.fieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10}}))
self.assertEqual(mode0.sceneFieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10}}))
self.assertEqual(mode0.maneuver, Maneuver.from_json('{"maneuverType": "double_roll_only", "A_rollMin": 10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}'))
ddiff = DeepDiff(mode0.fieldOfRegard,
[ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":12.5},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}}),
ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":-12.5},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}})
],
significant_digits=7, ignore_numeric_type_changes=True)
self.assertEqual(ddiff, {}, msg=ddiff)
self.assertEqual(mode0.pointingOption, [Orientation.from_dict({"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":10}),
Orientation.from_dict({"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":15})])
# mode1
mode1 = TestInstrument.bs3.mode[1]
self.assertEqual(mode1._id, 1)
self.assertEqual(mode1.mass, 10)
self.assertEqual(mode1.volume, 12.45)
self.assertEqual(mode1.dataRate, 40)
self.assertEqual(mode1.bitsPerPixel, 8)
self.assertEqual(mode1.power, 12)
self.assertEqual(mode1.numberDetectorRows, 5)
self.assertEqual(mode1.numberDetectorCols, 10)
self.assertEqual(mode0.fieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10}}))
self.assertEqual(mode0.sceneFieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10}}))
self.assertEqual(mode0.maneuver, Maneuver.from_json('{"maneuverType": "double_roll_only", "A_rollMin": 10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}'))
ddiff = DeepDiff(mode0.fieldOfRegard,
[ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":12.5},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}}),
ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":-12.5},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}})
],
significant_digits=7, ignore_numeric_type_changes=True)
self.assertEqual(ddiff, {}, msg=ddiff)
self.assertEqual(mode1.pointingOption, mode0.pointingOption)
# mode2
mode2 = TestInstrument.bs3.mode[2]
self.assertIsNotNone(mode2._id)
self.assertEqual(mode2.mass, 10)
self.assertEqual(mode2.volume, 12.45)
self.assertEqual(mode2.dataRate, 40)
self.assertEqual(mode2.bitsPerPixel, 8)
self.assertEqual(mode2.power, 12)
self.assertEqual(mode2.numberDetectorRows, 5)
self.assertEqual(mode2.numberDetectorCols, 10)
self.assertEqual(mode2.orientation, Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25}'))
self.assertEqual(mode0.fieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10}}))
self.assertEqual(mode0.sceneFieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10}}))
self.assertEqual(mode0.maneuver, Maneuver.from_json('{"maneuverType": "double_roll_only", "A_rollMin": 10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}'))
ddiff = DeepDiff(mode0.fieldOfRegard,
[ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":12.5},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}}),
ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":-12.5},
"sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}})
],
significant_digits=7, ignore_numeric_type_changes=True)
self.assertEqual(ddiff, {}, msg=ddiff)
self.assertEqual(mode2.pointingOption, mode0.pointingOption)
def test_get_type(self):
self.assertEqual(TestInstrument.bs1.get_type(), 'Basic Sensor')
self.assertEqual(TestInstrument.bs2.get_type(), 'Basic Sensor')
self.assertEqual(TestInstrument.bs3.get_type(), 'Basic Sensor')
def test_get_id(self):
self.assertEqual(TestInstrument.bs1.get_id(), "bs1")
self.assertIsNotNone(TestInstrument.bs2.get_id())
self.assertEqual(TestInstrument.bs3.get_id(), "bs3")
def test_get_mode_id(self): #@TODO
pass
def test_get_mode(self): #@TODO
pass
def test_get_field_of_regard(self): #@TODO
# bs1
# no input mode-id
self.assertEqual(TestInstrument.bs1.get_field_of_regard(), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 15}})])
# input correct mode-id
self.assertEqual(TestInstrument.bs1.get_field_of_regard(mode_id="0"), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 15}})])
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs1.get_field_of_regard(mode_id="abc"), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"},
"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 15}})])
# bs2
# no input mode-id
self.assertEqual(TestInstrument.bs2.get_field_of_regard(), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": 12.5},"sphericalGeometry": {"shape": "rectangular", "angleHeight": 5, "angleWidth":10}})])
# input correct mode-id
ddiff = DeepDiff(TestInstrument.bs2.get_field_of_regard(mode_id=101), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": 12.5},"sphericalGeometry": {"shape": "rectangular", "angleHeight": 5, "angleWidth":10}})], ignore_numeric_type_changes=True)
self.assertEqual(ddiff, {})
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs2.get_field_of_regard(mode_id="abc"), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": 12.5},"sphericalGeometry": {"shape": "rectangular", "angleHeight": 5, "angleWidth":10}})])
# bs3, all modes have the same field of regard
# no input mode-id
ddiff = DeepDiff(TestInstrument.bs3.get_field_of_regard()[0],
ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": 12.5},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15 }}),
significant_digits=7, ignore_numeric_type_changes=True)
self.assertEqual(ddiff, {}, msg=ddiff)
ddiff = DeepDiff(TestInstrument.bs3.get_field_of_regard()[1],
ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": -12.5},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15 }}),
significant_digits=7, ignore_numeric_type_changes=True)
self.assertEqual(ddiff, {}, msg=ddiff)
# input correct mode-id
ddiff = DeepDiff(TestInstrument.bs3.get_field_of_regard(mode_id=0)[0],
ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": 12.5},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15 }}),
significant_digits=7, ignore_numeric_type_changes=True)
self.assertEqual(ddiff, {}, msg=ddiff)
ddiff = DeepDiff(TestInstrument.bs3.get_field_of_regard(mode_id=0)[1],
ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": -12.5},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15 }}),
significant_digits=7, ignore_numeric_type_changes=True)
self.assertEqual(ddiff, {}, msg=ddiff)
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs3.get_field_of_regard(mode_id='abc'), TestInstrument.bs3.get_field_of_regard(mode_id=0))
# next mode
self.assertEqual(TestInstrument.bs3.get_field_of_regard(mode_id=1), TestInstrument.bs3.get_field_of_regard(mode_id=0))
# next mode,
mode_id = TestInstrument.bs3.mode_id[2]
self.assertEqual(TestInstrument.bs3.get_field_of_regard(mode_id=mode_id), TestInstrument.bs3.get_field_of_regard(mode_id=0))
def test_get_field_of_view(self):
# bs1
# no input mode-id
self.assertEqual(TestInstrument.bs1.get_field_of_view(), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 2.5}}))
# input correct mode-id
self.assertEqual(TestInstrument.bs1.get_field_of_view(mode_id="0"), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 2.5}}))
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs1.get_field_of_view(mode_id="abc"), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 2.5}}))
# bs2
# no input mode-id
self.assertEqual(TestInstrument.bs2.get_field_of_view(), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 5}}))
# input correct mode-id
self.assertEqual(TestInstrument.bs2.get_field_of_view(mode_id=101), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 5}}))
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs2.get_field_of_view(mode_id="abc"), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 5}}))
# bs3
# no input mode-id
self.assertEqual(TestInstrument.bs3.get_field_of_view(), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10 }}))
# input correct mode-id
self.assertEqual(TestInstrument.bs3.get_field_of_view(mode_id=0), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10 }}))
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs3.get_field_of_view(mode_id="abc"), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10 }}))
# next mode
self.assertEqual(TestInstrument.bs3.get_field_of_view(mode_id=1), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": 25},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10 }}))
# next mode
mode_id = TestInstrument.bs3.mode_id[2]
self.assertEqual(TestInstrument.bs3.get_field_of_view(mode_id=mode_id), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10 }}))
def test_get_scene_field_of_view(self):
# bs1
# no input mode-id
self.assertEqual(TestInstrument.bs1.get_scene_field_of_view(), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 5}}))
# input correct mode-id
self.assertEqual(TestInstrument.bs1.get_scene_field_of_view(mode_id="0"), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 5}}))
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs1.get_scene_field_of_view(mode_id="abc"), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 5}}))
# bs2
# no input mode-id
self.assertEqual(TestInstrument.bs2.get_scene_field_of_view(), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 5}}))
# input correct mode-id
self.assertEqual(TestInstrument.bs2.get_scene_field_of_view(mode_id=101), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 5}}))
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs2.get_scene_field_of_view(mode_id="abc"), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 5}}))
# bs3
# no input mode-id
self.assertEqual(TestInstrument.bs3.get_scene_field_of_view(), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }}))
# input correct mode-id
self.assertEqual(TestInstrument.bs3.get_scene_field_of_view(mode_id=0), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }}))
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs3.get_scene_field_of_view(mode_id="abc"), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }}))
# next mode
self.assertEqual(TestInstrument.bs3.get_scene_field_of_view(mode_id=1), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": 25},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }}))
# next mode
mode_id = TestInstrument.bs3.mode_id[2]
self.assertEqual(TestInstrument.bs3.get_scene_field_of_view(mode_id=mode_id), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }}))
def test_get_orientation(self):
# bs1
# no input mode-id
self.assertEqual(TestInstrument.bs1.get_orientation(), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
# input correct mode-id
self.assertEqual(TestInstrument.bs1.get_orientation(mode_id="0"), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs1.get_orientation(mode_id="abc"), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
# bs2
# no input mode-id
self.assertEqual(TestInstrument.bs2.get_orientation(), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
# input correct mode-id
self.assertEqual(TestInstrument.bs2.get_orientation(mode_id=101), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs2.get_orientation(mode_id="abc"), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
# bs3
# no input mode-id
self.assertEqual(TestInstrument.bs3.get_orientation(), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
# input correct mode-id
self.assertEqual(TestInstrument.bs3.get_orientation(mode_id=0), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
# input incorrect mode-id, should default to first mode
self.assertEqual(TestInstrument.bs3.get_orientation(mode_id="abc"), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}'))
# next mode
self.assertEqual(TestInstrument.bs3.get_orientation(mode_id=1), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": 25}'))
# next mode
mode_id = TestInstrument.bs3.mode_id[2]
self.assertEqual(TestInstrument.bs3.get_orientation(mode_id=mode_id), Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25}'))
def test_pixel_config(self): #@TODO
pass
def test_calc_data_metrics_bs1(self): #@TODO
""" Simple test involving satellite above POI at (lat = 0,lon = 0). Date chosen so that ECEF and ECI frames are aligned.
Sensor specs do not influence the below calcs. They do however shall influence the coverage calcs (which is not covered by this test).
"""
epoch_JDUT1 = 2458543.06088 # 2019 Feb 28 13:27:40 is time at which the ECEF and ECI frames approximately align, hence ECEF to ECI rotation is identity. See <https://www.celnav.de/longterm.htm> online calculator of GMST.
SpacecraftOrbitState = {'time [JDUT1]':epoch_JDUT1, 'x [km]': 6878.137, 'y [km]': 0, 'z [km]': 0, 'vx [km/s]': 0, 'vy [km/s]': 7.6126, 'vz [km/s]': 0} # altitude 500 km
TargetCoords = {'lat [deg]': 0, 'lon [deg]': 0}
# no input mode-id
obsv_metrics = TestInstrument.bs1.calc_data_metrics(None, SpacecraftOrbitState, TargetCoords)
self.assertAlmostEqual(obsv_metrics["observation range [km]"], 500, delta = 1)
self.assertAlmostEqual(obsv_metrics["incidence angle [deg]"], 0, delta = 0.1)
self.assertAlmostEqual(obsv_metrics["look angle [deg]"], 0, delta = 0.1)
self.assertAlmostEqual(obsv_metrics["solar zenith [deg]"], 20.335, delta = 0.1) # precomputed value at the epoch and (lat=0, lon=0) position
# correct mode-id
obsv_metrics = TestInstrument.bs1.calc_data_metrics("0", SpacecraftOrbitState, TargetCoords)
self.assertAlmostEqual(obsv_metrics["observation range [km]"], 500, delta = 1)
self.assertAlmostEqual(obsv_metrics["incidence angle [deg]"], 0, delta = 0.1)
self.assertAlmostEqual(obsv_metrics["look angle [deg]"], 0, delta = 0.1)
self.assertAlmostEqual(obsv_metrics["solar zenith [deg]"], 20.335, delta = 0.1) # precomputed value at the epoch and (lat=0, lon=0) position
# incorrect mode-id
obsv_metrics = TestInstrument.bs1.calc_data_metrics("abc", SpacecraftOrbitState, TargetCoords)
self.assertAlmostEqual(obsv_metrics["observation range [km]"], 500, delta = 1)
self.assertAlmostEqual(obsv_metrics["incidence angle [deg]"], 0, delta = 0.1)
self.assertAlmostEqual(obsv_metrics["look angle [deg]"], 0, delta = 0.1)
self.assertAlmostEqual(obsv_metrics["solar zenith [deg]"], 20.335, delta = 0.1) # precomputed value at the epoch and (lat=0, lon=0) position
def test_synthesize_observation(self): #@TODO
pass
def test_get_pointing_option(self): #@TODO
pass
| 79.318777
| 322
| 0.650655
| 3,688
| 36,328
| 6.215835
| 0.08026
| 0.089644
| 0.068313
| 0.055619
| 0.838074
| 0.804179
| 0.778573
| 0.768409
| 0.742322
| 0.714579
| 0
| 0.027149
| 0.211159
| 36,328
| 457
| 323
| 79.492341
| 0.772795
| 0.081232
| 0
| 0.41875
| 0
| 0.053125
| 0.292099
| 0.001444
| 0
| 0
| 0
| 0.004376
| 0.540625
| 1
| 0.05625
| false
| 0.040625
| 0.03125
| 0.003125
| 0.109375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
76ea7cd965e12ce48fdc6aecacf01b4fc0b9826d
| 10,120
|
py
|
Python
|
ui_data/gui_study_3.py
|
LeeDaeil/PyQt5_study
|
ecdd22ce2809ce6f01c8691a7ca75ef1771b7202
|
[
"MIT"
] | 1
|
2020-03-22T14:35:11.000Z
|
2020-03-22T14:35:11.000Z
|
ui_data/gui_study_3.py
|
LeeDaeil/PyQt5_study
|
ecdd22ce2809ce6f01c8691a7ca75ef1771b7202
|
[
"MIT"
] | null | null | null |
ui_data/gui_study_3.py
|
LeeDaeil/PyQt5_study
|
ecdd22ce2809ce6f01c8691a7ca75ef1771b7202
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'study_3.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 300)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(50, 60, 241, 181))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(42, 234, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 159, 134))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 132, 111))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(74, 53, 44))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(99, 70, 59))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(42, 234, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(42, 234, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(202, 180, 172))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(42, 234, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 159, 134))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 132, 111))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(74, 53, 44))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(99, 70, 59))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(42, 234, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(42, 234, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(202, 180, 172))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(74, 53, 44))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(42, 234, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(224, 159, 134))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 132, 111))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(74, 53, 44))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(99, 70, 59))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(74, 53, 44))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(74, 53, 44))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(42, 234, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(42, 234, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(149, 106, 89))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.pushButton.setPalette(palette)
self.pushButton.setAutoFillBackground(False)
self.pushButton.setStyleSheet("background-color: rgb(42, 234, 255);\n"
"image: url(qt_resource/Biological_Treatment.png);")
self.pushButton.setInputMethodHints(QtCore.Qt.ImhNone)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../../도장.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton.setIcon(icon)
self.pushButton.setIconSize(QtCore.QSize(30, 30))
self.pushButton.setCheckable(False)
self.pushButton.setAutoRepeat(False)
self.pushButton.setAutoDefault(False)
self.pushButton.setDefault(True)
self.pushButton.setFlat(False)
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(160, 120, 75, 23))
self.pushButton_2.setObjectName("pushButton_2")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "PushButton"))
self.pushButton_2.setText(_translate("Dialog", "PushButton"))
| 56.222222
| 93
| 0.694664
| 1,196
| 10,120
| 5.867057
| 0.121237
| 0.168591
| 0.102608
| 0.134673
| 0.824426
| 0.813026
| 0.813026
| 0.813026
| 0.813026
| 0.802622
| 0
| 0.041843
| 0.180534
| 10,120
| 179
| 94
| 56.536313
| 0.804293
| 0.017885
| 0
| 0.529762
| 1
| 0
| 0.017519
| 0.004229
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011905
| false
| 0
| 0.005952
| 0
| 0.02381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a069f77d031de0acacff4e0462c0bf5f0016c7c
| 30,959
|
py
|
Python
|
python/Model_Files/LFV_2/decays.py
|
ZAKI1905/HEP-Phen
|
bc06fecb2aa6bf108b59f76794e63c29eb37a35a
|
[
"MIT"
] | 1
|
2019-10-21T08:25:46.000Z
|
2019-10-21T08:25:46.000Z
|
python/Model_Files/LFV_2/decays.py
|
ZAKI1905/HEP-Phen
|
bc06fecb2aa6bf108b59f76794e63c29eb37a35a
|
[
"MIT"
] | null | null | null |
python/Model_Files/LFV_2/decays.py
|
ZAKI1905/HEP-Phen
|
bc06fecb2aa6bf108b59f76794e63c29eb37a35a
|
[
"MIT"
] | null | null | null |
# This file was automatically created by FeynRules 2.3.32
# Mathematica version: 11.3.0 for Mac OS X x86 (64-bit) (March 7, 2018)
# Date: Sat 21 Apr 2018 20:44:59
from object_library import all_decays, Decay
import particles as P
Decay_b = Decay(name = 'Decay_b',
particle = P.b,
partial_widths = {(P.W__minus__,P.c):'(((3*CKM2x3*ee**2*MB**2*complexconjugate(CKM2x3))/(2.*sw**2) + (3*CKM2x3*ee**2*MC**2*complexconjugate(CKM2x3))/(2.*sw**2) + (3*CKM2x3*ee**2*MB**4*complexconjugate(CKM2x3))/(2.*MW**2*sw**2) - (3*CKM2x3*ee**2*MB**2*MC**2*complexconjugate(CKM2x3))/(MW**2*sw**2) + (3*CKM2x3*ee**2*MC**4*complexconjugate(CKM2x3))/(2.*MW**2*sw**2) - (3*CKM2x3*ee**2*MW**2*complexconjugate(CKM2x3))/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MC**2 + MC**4 - 2*MB**2*MW**2 - 2*MC**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MB)**3)',
(P.W__minus__,P.t):'(((3*CKM3x3*ee**2*MB**2*complexconjugate(CKM3x3))/(2.*sw**2) + (3*CKM3x3*ee**2*MT**2*complexconjugate(CKM3x3))/(2.*sw**2) + (3*CKM3x3*ee**2*MB**4*complexconjugate(CKM3x3))/(2.*MW**2*sw**2) - (3*CKM3x3*ee**2*MB**2*MT**2*complexconjugate(CKM3x3))/(MW**2*sw**2) + (3*CKM3x3*ee**2*MT**4*complexconjugate(CKM3x3))/(2.*MW**2*sw**2) - (3*CKM3x3*ee**2*MW**2*complexconjugate(CKM3x3))/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MT**2 + MT**4 - 2*MB**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MB)**3)',
(P.W__minus__,P.u):'(((3*CKM1x3*ee**2*MB**2*complexconjugate(CKM1x3))/(2.*sw**2) + (3*CKM1x3*ee**2*MU**2*complexconjugate(CKM1x3))/(2.*sw**2) + (3*CKM1x3*ee**2*MB**4*complexconjugate(CKM1x3))/(2.*MW**2*sw**2) - (3*CKM1x3*ee**2*MB**2*MU**2*complexconjugate(CKM1x3))/(MW**2*sw**2) + (3*CKM1x3*ee**2*MU**4*complexconjugate(CKM1x3))/(2.*MW**2*sw**2) - (3*CKM1x3*ee**2*MW**2*complexconjugate(CKM1x3))/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MU**2 + MU**4 - 2*MB**2*MW**2 - 2*MU**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MB)**3)'})
Decay_c = Decay(name = 'Decay_c',
particle = P.c,
partial_widths = {(P.W__plus__,P.b):'(((3*CKM2x3*ee**2*MB**2*complexconjugate(CKM2x3))/(2.*sw**2) + (3*CKM2x3*ee**2*MC**2*complexconjugate(CKM2x3))/(2.*sw**2) + (3*CKM2x3*ee**2*MB**4*complexconjugate(CKM2x3))/(2.*MW**2*sw**2) - (3*CKM2x3*ee**2*MB**2*MC**2*complexconjugate(CKM2x3))/(MW**2*sw**2) + (3*CKM2x3*ee**2*MC**4*complexconjugate(CKM2x3))/(2.*MW**2*sw**2) - (3*CKM2x3*ee**2*MW**2*complexconjugate(CKM2x3))/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MC**2 + MC**4 - 2*MB**2*MW**2 - 2*MC**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MC)**3)',
(P.W__plus__,P.d):'(((3*CKM2x1*ee**2*MC**2*complexconjugate(CKM2x1))/(2.*sw**2) + (3*CKM2x1*ee**2*MD**2*complexconjugate(CKM2x1))/(2.*sw**2) + (3*CKM2x1*ee**2*MC**4*complexconjugate(CKM2x1))/(2.*MW**2*sw**2) - (3*CKM2x1*ee**2*MC**2*MD**2*complexconjugate(CKM2x1))/(MW**2*sw**2) + (3*CKM2x1*ee**2*MD**4*complexconjugate(CKM2x1))/(2.*MW**2*sw**2) - (3*CKM2x1*ee**2*MW**2*complexconjugate(CKM2x1))/sw**2)*cmath.sqrt(MC**4 - 2*MC**2*MD**2 + MD**4 - 2*MC**2*MW**2 - 2*MD**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MC)**3)',
(P.W__plus__,P.s):'(((3*CKM2x2*ee**2*MC**2*complexconjugate(CKM2x2))/(2.*sw**2) + (3*CKM2x2*ee**2*MS**2*complexconjugate(CKM2x2))/(2.*sw**2) + (3*CKM2x2*ee**2*MC**4*complexconjugate(CKM2x2))/(2.*MW**2*sw**2) - (3*CKM2x2*ee**2*MC**2*MS**2*complexconjugate(CKM2x2))/(MW**2*sw**2) + (3*CKM2x2*ee**2*MS**4*complexconjugate(CKM2x2))/(2.*MW**2*sw**2) - (3*CKM2x2*ee**2*MW**2*complexconjugate(CKM2x2))/sw**2)*cmath.sqrt(MC**4 - 2*MC**2*MS**2 + MS**4 - 2*MC**2*MW**2 - 2*MS**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MC)**3)'})
Decay_d = Decay(name = 'Decay_d',
particle = P.d,
partial_widths = {(P.W__minus__,P.c):'(((3*CKM2x1*ee**2*MC**2*complexconjugate(CKM2x1))/(2.*sw**2) + (3*CKM2x1*ee**2*MD**2*complexconjugate(CKM2x1))/(2.*sw**2) + (3*CKM2x1*ee**2*MC**4*complexconjugate(CKM2x1))/(2.*MW**2*sw**2) - (3*CKM2x1*ee**2*MC**2*MD**2*complexconjugate(CKM2x1))/(MW**2*sw**2) + (3*CKM2x1*ee**2*MD**4*complexconjugate(CKM2x1))/(2.*MW**2*sw**2) - (3*CKM2x1*ee**2*MW**2*complexconjugate(CKM2x1))/sw**2)*cmath.sqrt(MC**4 - 2*MC**2*MD**2 + MD**4 - 2*MC**2*MW**2 - 2*MD**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MD)**3)',
(P.W__minus__,P.t):'(((3*CKM3x1*ee**2*MD**2*complexconjugate(CKM3x1))/(2.*sw**2) + (3*CKM3x1*ee**2*MT**2*complexconjugate(CKM3x1))/(2.*sw**2) + (3*CKM3x1*ee**2*MD**4*complexconjugate(CKM3x1))/(2.*MW**2*sw**2) - (3*CKM3x1*ee**2*MD**2*MT**2*complexconjugate(CKM3x1))/(MW**2*sw**2) + (3*CKM3x1*ee**2*MT**4*complexconjugate(CKM3x1))/(2.*MW**2*sw**2) - (3*CKM3x1*ee**2*MW**2*complexconjugate(CKM3x1))/sw**2)*cmath.sqrt(MD**4 - 2*MD**2*MT**2 + MT**4 - 2*MD**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MD)**3)',
(P.W__minus__,P.u):'(((3*CKM1x1*ee**2*MD**2*complexconjugate(CKM1x1))/(2.*sw**2) + (3*CKM1x1*ee**2*MU**2*complexconjugate(CKM1x1))/(2.*sw**2) + (3*CKM1x1*ee**2*MD**4*complexconjugate(CKM1x1))/(2.*MW**2*sw**2) - (3*CKM1x1*ee**2*MD**2*MU**2*complexconjugate(CKM1x1))/(MW**2*sw**2) + (3*CKM1x1*ee**2*MU**4*complexconjugate(CKM1x1))/(2.*MW**2*sw**2) - (3*CKM1x1*ee**2*MW**2*complexconjugate(CKM1x1))/sw**2)*cmath.sqrt(MD**4 - 2*MD**2*MU**2 + MU**4 - 2*MD**2*MW**2 - 2*MU**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MD)**3)'})
Decay_e__minus__ = Decay(name = 'Decay_e__minus__',
particle = P.e__minus__,
partial_widths = {(P.fi,P.mu__minus__):'((Me*MMU*vev**2*yf1x2*yf2x1 - 50*vev**2*yf1x2*complexconjugate(yf1x2) + (Me**2*vev**2*yf1x2*complexconjugate(yf1x2))/2. + (MMU**2*vev**2*yf1x2*complexconjugate(yf1x2))/2. - 50*vev**2*yf2x1*complexconjugate(yf2x1) + (Me**2*vev**2*yf2x1*complexconjugate(yf2x1))/2. + (MMU**2*vev**2*yf2x1*complexconjugate(yf2x1))/2. + Me*MMU*vev**2*complexconjugate(yf1x2)*complexconjugate(yf2x1))*cmath.sqrt(10000 - 200*Me**2 + Me**4 - 200*MMU**2 - 2*Me**2*MMU**2 + MMU**4))/(32.*cmath.pi*abs(Me)**3)',
(P.fi,P.ta__minus__):'((Me*MTA*vev**2*yf1x3*yf3x1 - 50*vev**2*yf1x3*complexconjugate(yf1x3) + (Me**2*vev**2*yf1x3*complexconjugate(yf1x3))/2. + (MTA**2*vev**2*yf1x3*complexconjugate(yf1x3))/2. - 50*vev**2*yf3x1*complexconjugate(yf3x1) + (Me**2*vev**2*yf3x1*complexconjugate(yf3x1))/2. + (MTA**2*vev**2*yf3x1*complexconjugate(yf3x1))/2. + Me*MTA*vev**2*complexconjugate(yf1x3)*complexconjugate(yf3x1))*cmath.sqrt(10000 - 200*Me**2 + Me**4 - 200*MTA**2 - 2*Me**2*MTA**2 + MTA**4))/(32.*cmath.pi*abs(Me)**3)',
(P.W__minus__,P.ve):'((Me**2 - MW**2)*((ee**2*Me**2)/(2.*sw**2) + (ee**2*Me**4)/(2.*MW**2*sw**2) - (ee**2*MW**2)/sw**2))/(32.*cmath.pi*abs(Me)**3)'})
Decay_fi = Decay(name = 'Decay_fi',
particle = P.fi,
partial_widths = {(P.e__minus__,P.e__plus__):'((-(Me**2*vev**2*yf1x1**2) + 100*vev**2*yf1x1*complexconjugate(yf1x1) - 2*Me**2*vev**2*yf1x1*complexconjugate(yf1x1) - Me**2*vev**2*complexconjugate(yf1x1)**2)*cmath.sqrt(10000 - 400*Me**2))/(16000.*cmath.pi)',
(P.e__minus__,P.mu__plus__):'((-(Me*MMU*vev**2*yf1x2*yf2x1) + 50*vev**2*yf1x2*complexconjugate(yf1x2) - (Me**2*vev**2*yf1x2*complexconjugate(yf1x2))/2. - (MMU**2*vev**2*yf1x2*complexconjugate(yf1x2))/2. + 50*vev**2*yf2x1*complexconjugate(yf2x1) - (Me**2*vev**2*yf2x1*complexconjugate(yf2x1))/2. - (MMU**2*vev**2*yf2x1*complexconjugate(yf2x1))/2. - Me*MMU*vev**2*complexconjugate(yf1x2)*complexconjugate(yf2x1))*cmath.sqrt(10000 - 200*Me**2 + Me**4 - 200*MMU**2 - 2*Me**2*MMU**2 + MMU**4))/(16000.*cmath.pi)',
(P.e__minus__,P.ta__plus__):'((-(Me*MTA*vev**2*yf1x3*yf3x1) + 50*vev**2*yf1x3*complexconjugate(yf1x3) - (Me**2*vev**2*yf1x3*complexconjugate(yf1x3))/2. - (MTA**2*vev**2*yf1x3*complexconjugate(yf1x3))/2. + 50*vev**2*yf3x1*complexconjugate(yf3x1) - (Me**2*vev**2*yf3x1*complexconjugate(yf3x1))/2. - (MTA**2*vev**2*yf3x1*complexconjugate(yf3x1))/2. - Me*MTA*vev**2*complexconjugate(yf1x3)*complexconjugate(yf3x1))*cmath.sqrt(10000 - 200*Me**2 + Me**4 - 200*MTA**2 - 2*Me**2*MTA**2 + MTA**4))/(16000.*cmath.pi)',
(P.mu__minus__,P.e__plus__):'((-(Me*MMU*vev**2*yf1x2*yf2x1) + 50*vev**2*yf1x2*complexconjugate(yf1x2) - (Me**2*vev**2*yf1x2*complexconjugate(yf1x2))/2. - (MMU**2*vev**2*yf1x2*complexconjugate(yf1x2))/2. + 50*vev**2*yf2x1*complexconjugate(yf2x1) - (Me**2*vev**2*yf2x1*complexconjugate(yf2x1))/2. - (MMU**2*vev**2*yf2x1*complexconjugate(yf2x1))/2. - Me*MMU*vev**2*complexconjugate(yf1x2)*complexconjugate(yf2x1))*cmath.sqrt(10000 - 200*Me**2 + Me**4 - 200*MMU**2 - 2*Me**2*MMU**2 + MMU**4))/(16000.*cmath.pi)',
(P.mu__minus__,P.mu__plus__):'((-(MMU**2*vev**2*yf2x2**2) + 100*vev**2*yf2x2*complexconjugate(yf2x2) - 2*MMU**2*vev**2*yf2x2*complexconjugate(yf2x2) - MMU**2*vev**2*complexconjugate(yf2x2)**2)*cmath.sqrt(10000 - 400*MMU**2))/(16000.*cmath.pi)',
(P.mu__minus__,P.ta__plus__):'((-(MMU*MTA*vev**2*yf2x3*yf3x2) + 50*vev**2*yf2x3*complexconjugate(yf2x3) - (MMU**2*vev**2*yf2x3*complexconjugate(yf2x3))/2. - (MTA**2*vev**2*yf2x3*complexconjugate(yf2x3))/2. + 50*vev**2*yf3x2*complexconjugate(yf3x2) - (MMU**2*vev**2*yf3x2*complexconjugate(yf3x2))/2. - (MTA**2*vev**2*yf3x2*complexconjugate(yf3x2))/2. - MMU*MTA*vev**2*complexconjugate(yf2x3)*complexconjugate(yf3x2))*cmath.sqrt(10000 - 200*MMU**2 + MMU**4 - 200*MTA**2 - 2*MMU**2*MTA**2 + MTA**4))/(16000.*cmath.pi)',
(P.ta__minus__,P.e__plus__):'((-(Me*MTA*vev**2*yf1x3*yf3x1) + 50*vev**2*yf1x3*complexconjugate(yf1x3) - (Me**2*vev**2*yf1x3*complexconjugate(yf1x3))/2. - (MTA**2*vev**2*yf1x3*complexconjugate(yf1x3))/2. + 50*vev**2*yf3x1*complexconjugate(yf3x1) - (Me**2*vev**2*yf3x1*complexconjugate(yf3x1))/2. - (MTA**2*vev**2*yf3x1*complexconjugate(yf3x1))/2. - Me*MTA*vev**2*complexconjugate(yf1x3)*complexconjugate(yf3x1))*cmath.sqrt(10000 - 200*Me**2 + Me**4 - 200*MTA**2 - 2*Me**2*MTA**2 + MTA**4))/(16000.*cmath.pi)',
(P.ta__minus__,P.mu__plus__):'((-(MMU*MTA*vev**2*yf2x3*yf3x2) + 50*vev**2*yf2x3*complexconjugate(yf2x3) - (MMU**2*vev**2*yf2x3*complexconjugate(yf2x3))/2. - (MTA**2*vev**2*yf2x3*complexconjugate(yf2x3))/2. + 50*vev**2*yf3x2*complexconjugate(yf3x2) - (MMU**2*vev**2*yf3x2*complexconjugate(yf3x2))/2. - (MTA**2*vev**2*yf3x2*complexconjugate(yf3x2))/2. - MMU*MTA*vev**2*complexconjugate(yf2x3)*complexconjugate(yf3x2))*cmath.sqrt(10000 - 200*MMU**2 + MMU**4 - 200*MTA**2 - 2*MMU**2*MTA**2 + MTA**4))/(16000.*cmath.pi)',
(P.ta__minus__,P.ta__plus__):'((-(MTA**2*vev**2*yf3x3**2) + 100*vev**2*yf3x3*complexconjugate(yf3x3) - 2*MTA**2*vev**2*yf3x3*complexconjugate(yf3x3) - MTA**2*vev**2*complexconjugate(yf3x3)**2)*cmath.sqrt(10000 - 400*MTA**2))/(16000.*cmath.pi)'})
Decay_H = Decay(name = 'Decay_H',
particle = P.H,
partial_widths = {(P.a,P.a):'(AH**2*MH**6)/(64.*cmath.pi*abs(MH)**3)',
(P.b,P.b__tilde__):'((-12*MB**2*yb**2 + 3*MH**2*yb**2)*cmath.sqrt(-4*MB**2*MH**2 + MH**4))/(16.*cmath.pi*abs(MH)**3)',
(P.c,P.c__tilde__):'((-12*MC**2*yc**2 + 3*MH**2*yc**2)*cmath.sqrt(-4*MC**2*MH**2 + MH**4))/(16.*cmath.pi*abs(MH)**3)',
(P.d,P.d__tilde__):'((-12*MD**2*ydo**2 + 3*MH**2*ydo**2)*cmath.sqrt(-4*MD**2*MH**2 + MH**4))/(16.*cmath.pi*abs(MH)**3)',
(P.e__minus__,P.e__plus__):'((-4*Me**2*ye**2 + MH**2*ye**2)*cmath.sqrt(-4*Me**2*MH**2 + MH**4))/(16.*cmath.pi*abs(MH)**3)',
(P.fi,P.fi):'(kq**2*vev**2*cmath.sqrt(-400*MH**2 + MH**4))/(32.*cmath.pi*abs(MH)**3)',
(P.g,P.g):'(GH**2*MH**6)/(8.*cmath.pi*abs(MH)**3)',
(P.mu__minus__,P.mu__plus__):'((MH**2*ym**2 - 4*MMU**2*ym**2)*cmath.sqrt(MH**4 - 4*MH**2*MMU**2))/(16.*cmath.pi*abs(MH)**3)',
(P.s,P.s__tilde__):'((3*MH**2*ys**2 - 12*MS**2*ys**2)*cmath.sqrt(MH**4 - 4*MH**2*MS**2))/(16.*cmath.pi*abs(MH)**3)',
(P.t,P.t__tilde__):'((3*MH**2*yt**2 - 12*MT**2*yt**2)*cmath.sqrt(MH**4 - 4*MH**2*MT**2))/(16.*cmath.pi*abs(MH)**3)',
(P.ta__minus__,P.ta__plus__):'((MH**2*ytau**2 - 4*MTA**2*ytau**2)*cmath.sqrt(MH**4 - 4*MH**2*MTA**2))/(16.*cmath.pi*abs(MH)**3)',
(P.u,P.u__tilde__):'((3*MH**2*yup**2 - 12*MU**2*yup**2)*cmath.sqrt(MH**4 - 4*MH**2*MU**2))/(16.*cmath.pi*abs(MH)**3)',
(P.W__minus__,P.W__plus__):'(((3*ee**4*vev**2)/(4.*sw**4) + (ee**4*MH**4*vev**2)/(16.*MW**4*sw**4) - (ee**4*MH**2*vev**2)/(4.*MW**2*sw**4))*cmath.sqrt(MH**4 - 4*MH**2*MW**2))/(16.*cmath.pi*abs(MH)**3)',
(P.Z,P.Z):'(((9*ee**4*vev**2)/2. + (3*ee**4*MH**4*vev**2)/(8.*MZ**4) - (3*ee**4*MH**2*vev**2)/(2.*MZ**2) + (3*cw**4*ee**4*vev**2)/(4.*sw**4) + (cw**4*ee**4*MH**4*vev**2)/(16.*MZ**4*sw**4) - (cw**4*ee**4*MH**2*vev**2)/(4.*MZ**2*sw**4) + (3*cw**2*ee**4*vev**2)/sw**2 + (cw**2*ee**4*MH**4*vev**2)/(4.*MZ**4*sw**2) - (cw**2*ee**4*MH**2*vev**2)/(MZ**2*sw**2) + (3*ee**4*sw**2*vev**2)/cw**2 + (ee**4*MH**4*sw**2*vev**2)/(4.*cw**2*MZ**4) - (ee**4*MH**2*sw**2*vev**2)/(cw**2*MZ**2) + (3*ee**4*sw**4*vev**2)/(4.*cw**4) + (ee**4*MH**4*sw**4*vev**2)/(16.*cw**4*MZ**4) - (ee**4*MH**2*sw**4*vev**2)/(4.*cw**4*MZ**2))*cmath.sqrt(MH**4 - 4*MH**2*MZ**2))/(32.*cmath.pi*abs(MH)**3)'})
Decay_h1 = Decay(name = 'Decay_h1',
particle = P.h1,
partial_widths = {(P.g,P.g):'(Gphi**2*MP**6)/(8.*cmath.pi*abs(MP)**3)'})
Decay_mu__minus__ = Decay(name = 'Decay_mu__minus__',
particle = P.mu__minus__,
partial_widths = {(P.fi,P.e__minus__):'((Me*MMU*vev**2*yf1x2*yf2x1 - 50*vev**2*yf1x2*complexconjugate(yf1x2) + (Me**2*vev**2*yf1x2*complexconjugate(yf1x2))/2. + (MMU**2*vev**2*yf1x2*complexconjugate(yf1x2))/2. - 50*vev**2*yf2x1*complexconjugate(yf2x1) + (Me**2*vev**2*yf2x1*complexconjugate(yf2x1))/2. + (MMU**2*vev**2*yf2x1*complexconjugate(yf2x1))/2. + Me*MMU*vev**2*complexconjugate(yf1x2)*complexconjugate(yf2x1))*cmath.sqrt(10000 - 200*Me**2 + Me**4 - 200*MMU**2 - 2*Me**2*MMU**2 + MMU**4))/(32.*cmath.pi*abs(MMU)**3)',
(P.fi,P.ta__minus__):'((MMU*MTA*vev**2*yf2x3*yf3x2 - 50*vev**2*yf2x3*complexconjugate(yf2x3) + (MMU**2*vev**2*yf2x3*complexconjugate(yf2x3))/2. + (MTA**2*vev**2*yf2x3*complexconjugate(yf2x3))/2. - 50*vev**2*yf3x2*complexconjugate(yf3x2) + (MMU**2*vev**2*yf3x2*complexconjugate(yf3x2))/2. + (MTA**2*vev**2*yf3x2*complexconjugate(yf3x2))/2. + MMU*MTA*vev**2*complexconjugate(yf2x3)*complexconjugate(yf3x2))*cmath.sqrt(10000 - 200*MMU**2 + MMU**4 - 200*MTA**2 - 2*MMU**2*MTA**2 + MTA**4))/(32.*cmath.pi*abs(MMU)**3)',
(P.W__minus__,P.vm):'((MMU**2 - MW**2)*((ee**2*MMU**2)/(2.*sw**2) + (ee**2*MMU**4)/(2.*MW**2*sw**2) - (ee**2*MW**2)/sw**2))/(32.*cmath.pi*abs(MMU)**3)'})
Decay_s = Decay(name = 'Decay_s',
particle = P.s,
partial_widths = {(P.W__minus__,P.c):'(((3*CKM2x2*ee**2*MC**2*complexconjugate(CKM2x2))/(2.*sw**2) + (3*CKM2x2*ee**2*MS**2*complexconjugate(CKM2x2))/(2.*sw**2) + (3*CKM2x2*ee**2*MC**4*complexconjugate(CKM2x2))/(2.*MW**2*sw**2) - (3*CKM2x2*ee**2*MC**2*MS**2*complexconjugate(CKM2x2))/(MW**2*sw**2) + (3*CKM2x2*ee**2*MS**4*complexconjugate(CKM2x2))/(2.*MW**2*sw**2) - (3*CKM2x2*ee**2*MW**2*complexconjugate(CKM2x2))/sw**2)*cmath.sqrt(MC**4 - 2*MC**2*MS**2 + MS**4 - 2*MC**2*MW**2 - 2*MS**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MS)**3)',
(P.W__minus__,P.t):'(((3*CKM3x2*ee**2*MS**2*complexconjugate(CKM3x2))/(2.*sw**2) + (3*CKM3x2*ee**2*MT**2*complexconjugate(CKM3x2))/(2.*sw**2) + (3*CKM3x2*ee**2*MS**4*complexconjugate(CKM3x2))/(2.*MW**2*sw**2) - (3*CKM3x2*ee**2*MS**2*MT**2*complexconjugate(CKM3x2))/(MW**2*sw**2) + (3*CKM3x2*ee**2*MT**4*complexconjugate(CKM3x2))/(2.*MW**2*sw**2) - (3*CKM3x2*ee**2*MW**2*complexconjugate(CKM3x2))/sw**2)*cmath.sqrt(MS**4 - 2*MS**2*MT**2 + MT**4 - 2*MS**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MS)**3)',
(P.W__minus__,P.u):'(((3*CKM1x2*ee**2*MS**2*complexconjugate(CKM1x2))/(2.*sw**2) + (3*CKM1x2*ee**2*MU**2*complexconjugate(CKM1x2))/(2.*sw**2) + (3*CKM1x2*ee**2*MS**4*complexconjugate(CKM1x2))/(2.*MW**2*sw**2) - (3*CKM1x2*ee**2*MS**2*MU**2*complexconjugate(CKM1x2))/(MW**2*sw**2) + (3*CKM1x2*ee**2*MU**4*complexconjugate(CKM1x2))/(2.*MW**2*sw**2) - (3*CKM1x2*ee**2*MW**2*complexconjugate(CKM1x2))/sw**2)*cmath.sqrt(MS**4 - 2*MS**2*MU**2 + MU**4 - 2*MS**2*MW**2 - 2*MU**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MS)**3)'})
Decay_t = Decay(name = 'Decay_t',
particle = P.t,
partial_widths = {(P.W__plus__,P.b):'(((3*CKM3x3*ee**2*MB**2*complexconjugate(CKM3x3))/(2.*sw**2) + (3*CKM3x3*ee**2*MT**2*complexconjugate(CKM3x3))/(2.*sw**2) + (3*CKM3x3*ee**2*MB**4*complexconjugate(CKM3x3))/(2.*MW**2*sw**2) - (3*CKM3x3*ee**2*MB**2*MT**2*complexconjugate(CKM3x3))/(MW**2*sw**2) + (3*CKM3x3*ee**2*MT**4*complexconjugate(CKM3x3))/(2.*MW**2*sw**2) - (3*CKM3x3*ee**2*MW**2*complexconjugate(CKM3x3))/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MT**2 + MT**4 - 2*MB**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MT)**3)',
(P.W__plus__,P.d):'(((3*CKM3x1*ee**2*MD**2*complexconjugate(CKM3x1))/(2.*sw**2) + (3*CKM3x1*ee**2*MT**2*complexconjugate(CKM3x1))/(2.*sw**2) + (3*CKM3x1*ee**2*MD**4*complexconjugate(CKM3x1))/(2.*MW**2*sw**2) - (3*CKM3x1*ee**2*MD**2*MT**2*complexconjugate(CKM3x1))/(MW**2*sw**2) + (3*CKM3x1*ee**2*MT**4*complexconjugate(CKM3x1))/(2.*MW**2*sw**2) - (3*CKM3x1*ee**2*MW**2*complexconjugate(CKM3x1))/sw**2)*cmath.sqrt(MD**4 - 2*MD**2*MT**2 + MT**4 - 2*MD**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MT)**3)',
(P.W__plus__,P.s):'(((3*CKM3x2*ee**2*MS**2*complexconjugate(CKM3x2))/(2.*sw**2) + (3*CKM3x2*ee**2*MT**2*complexconjugate(CKM3x2))/(2.*sw**2) + (3*CKM3x2*ee**2*MS**4*complexconjugate(CKM3x2))/(2.*MW**2*sw**2) - (3*CKM3x2*ee**2*MS**2*MT**2*complexconjugate(CKM3x2))/(MW**2*sw**2) + (3*CKM3x2*ee**2*MT**4*complexconjugate(CKM3x2))/(2.*MW**2*sw**2) - (3*CKM3x2*ee**2*MW**2*complexconjugate(CKM3x2))/sw**2)*cmath.sqrt(MS**4 - 2*MS**2*MT**2 + MT**4 - 2*MS**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MT)**3)'})
Decay_ta__minus__ = Decay(name = 'Decay_ta__minus__',
particle = P.ta__minus__,
partial_widths = {(P.fi,P.e__minus__):'((Me*MTA*vev**2*yf1x3*yf3x1 - 50*vev**2*yf1x3*complexconjugate(yf1x3) + (Me**2*vev**2*yf1x3*complexconjugate(yf1x3))/2. + (MTA**2*vev**2*yf1x3*complexconjugate(yf1x3))/2. - 50*vev**2*yf3x1*complexconjugate(yf3x1) + (Me**2*vev**2*yf3x1*complexconjugate(yf3x1))/2. + (MTA**2*vev**2*yf3x1*complexconjugate(yf3x1))/2. + Me*MTA*vev**2*complexconjugate(yf1x3)*complexconjugate(yf3x1))*cmath.sqrt(10000 - 200*Me**2 + Me**4 - 200*MTA**2 - 2*Me**2*MTA**2 + MTA**4))/(32.*cmath.pi*abs(MTA)**3)',
(P.fi,P.mu__minus__):'((MMU*MTA*vev**2*yf2x3*yf3x2 - 50*vev**2*yf2x3*complexconjugate(yf2x3) + (MMU**2*vev**2*yf2x3*complexconjugate(yf2x3))/2. + (MTA**2*vev**2*yf2x3*complexconjugate(yf2x3))/2. - 50*vev**2*yf3x2*complexconjugate(yf3x2) + (MMU**2*vev**2*yf3x2*complexconjugate(yf3x2))/2. + (MTA**2*vev**2*yf3x2*complexconjugate(yf3x2))/2. + MMU*MTA*vev**2*complexconjugate(yf2x3)*complexconjugate(yf3x2))*cmath.sqrt(10000 - 200*MMU**2 + MMU**4 - 200*MTA**2 - 2*MMU**2*MTA**2 + MTA**4))/(32.*cmath.pi*abs(MTA)**3)',
(P.W__minus__,P.vt):'((MTA**2 - MW**2)*((ee**2*MTA**2)/(2.*sw**2) + (ee**2*MTA**4)/(2.*MW**2*sw**2) - (ee**2*MW**2)/sw**2))/(32.*cmath.pi*abs(MTA)**3)'})
Decay_u = Decay(name = 'Decay_u',
particle = P.u,
partial_widths = {(P.W__plus__,P.b):'(((3*CKM1x3*ee**2*MB**2*complexconjugate(CKM1x3))/(2.*sw**2) + (3*CKM1x3*ee**2*MU**2*complexconjugate(CKM1x3))/(2.*sw**2) + (3*CKM1x3*ee**2*MB**4*complexconjugate(CKM1x3))/(2.*MW**2*sw**2) - (3*CKM1x3*ee**2*MB**2*MU**2*complexconjugate(CKM1x3))/(MW**2*sw**2) + (3*CKM1x3*ee**2*MU**4*complexconjugate(CKM1x3))/(2.*MW**2*sw**2) - (3*CKM1x3*ee**2*MW**2*complexconjugate(CKM1x3))/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MU**2 + MU**4 - 2*MB**2*MW**2 - 2*MU**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MU)**3)',
(P.W__plus__,P.d):'(((3*CKM1x1*ee**2*MD**2*complexconjugate(CKM1x1))/(2.*sw**2) + (3*CKM1x1*ee**2*MU**2*complexconjugate(CKM1x1))/(2.*sw**2) + (3*CKM1x1*ee**2*MD**4*complexconjugate(CKM1x1))/(2.*MW**2*sw**2) - (3*CKM1x1*ee**2*MD**2*MU**2*complexconjugate(CKM1x1))/(MW**2*sw**2) + (3*CKM1x1*ee**2*MU**4*complexconjugate(CKM1x1))/(2.*MW**2*sw**2) - (3*CKM1x1*ee**2*MW**2*complexconjugate(CKM1x1))/sw**2)*cmath.sqrt(MD**4 - 2*MD**2*MU**2 + MU**4 - 2*MD**2*MW**2 - 2*MU**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MU)**3)',
(P.W__plus__,P.s):'(((3*CKM1x2*ee**2*MS**2*complexconjugate(CKM1x2))/(2.*sw**2) + (3*CKM1x2*ee**2*MU**2*complexconjugate(CKM1x2))/(2.*sw**2) + (3*CKM1x2*ee**2*MS**4*complexconjugate(CKM1x2))/(2.*MW**2*sw**2) - (3*CKM1x2*ee**2*MS**2*MU**2*complexconjugate(CKM1x2))/(MW**2*sw**2) + (3*CKM1x2*ee**2*MU**4*complexconjugate(CKM1x2))/(2.*MW**2*sw**2) - (3*CKM1x2*ee**2*MW**2*complexconjugate(CKM1x2))/sw**2)*cmath.sqrt(MS**4 - 2*MS**2*MU**2 + MU**4 - 2*MS**2*MW**2 - 2*MU**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MU)**3)'})
Decay_W__plus__ = Decay(name = 'Decay_W__plus__',
particle = P.W__plus__,
partial_widths = {(P.c,P.b__tilde__):'(((-3*CKM2x3*ee**2*MB**2*complexconjugate(CKM2x3))/(2.*sw**2) - (3*CKM2x3*ee**2*MC**2*complexconjugate(CKM2x3))/(2.*sw**2) - (3*CKM2x3*ee**2*MB**4*complexconjugate(CKM2x3))/(2.*MW**2*sw**2) + (3*CKM2x3*ee**2*MB**2*MC**2*complexconjugate(CKM2x3))/(MW**2*sw**2) - (3*CKM2x3*ee**2*MC**4*complexconjugate(CKM2x3))/(2.*MW**2*sw**2) + (3*CKM2x3*ee**2*MW**2*complexconjugate(CKM2x3))/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MC**2 + MC**4 - 2*MB**2*MW**2 - 2*MC**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.c,P.d__tilde__):'(((-3*CKM2x1*ee**2*MC**2*complexconjugate(CKM2x1))/(2.*sw**2) - (3*CKM2x1*ee**2*MD**2*complexconjugate(CKM2x1))/(2.*sw**2) - (3*CKM2x1*ee**2*MC**4*complexconjugate(CKM2x1))/(2.*MW**2*sw**2) + (3*CKM2x1*ee**2*MC**2*MD**2*complexconjugate(CKM2x1))/(MW**2*sw**2) - (3*CKM2x1*ee**2*MD**4*complexconjugate(CKM2x1))/(2.*MW**2*sw**2) + (3*CKM2x1*ee**2*MW**2*complexconjugate(CKM2x1))/sw**2)*cmath.sqrt(MC**4 - 2*MC**2*MD**2 + MD**4 - 2*MC**2*MW**2 - 2*MD**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.c,P.s__tilde__):'(((-3*CKM2x2*ee**2*MC**2*complexconjugate(CKM2x2))/(2.*sw**2) - (3*CKM2x2*ee**2*MS**2*complexconjugate(CKM2x2))/(2.*sw**2) - (3*CKM2x2*ee**2*MC**4*complexconjugate(CKM2x2))/(2.*MW**2*sw**2) + (3*CKM2x2*ee**2*MC**2*MS**2*complexconjugate(CKM2x2))/(MW**2*sw**2) - (3*CKM2x2*ee**2*MS**4*complexconjugate(CKM2x2))/(2.*MW**2*sw**2) + (3*CKM2x2*ee**2*MW**2*complexconjugate(CKM2x2))/sw**2)*cmath.sqrt(MC**4 - 2*MC**2*MS**2 + MS**4 - 2*MC**2*MW**2 - 2*MS**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.t,P.b__tilde__):'(((-3*CKM3x3*ee**2*MB**2*complexconjugate(CKM3x3))/(2.*sw**2) - (3*CKM3x3*ee**2*MT**2*complexconjugate(CKM3x3))/(2.*sw**2) - (3*CKM3x3*ee**2*MB**4*complexconjugate(CKM3x3))/(2.*MW**2*sw**2) + (3*CKM3x3*ee**2*MB**2*MT**2*complexconjugate(CKM3x3))/(MW**2*sw**2) - (3*CKM3x3*ee**2*MT**4*complexconjugate(CKM3x3))/(2.*MW**2*sw**2) + (3*CKM3x3*ee**2*MW**2*complexconjugate(CKM3x3))/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MT**2 + MT**4 - 2*MB**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.t,P.d__tilde__):'(((-3*CKM3x1*ee**2*MD**2*complexconjugate(CKM3x1))/(2.*sw**2) - (3*CKM3x1*ee**2*MT**2*complexconjugate(CKM3x1))/(2.*sw**2) - (3*CKM3x1*ee**2*MD**4*complexconjugate(CKM3x1))/(2.*MW**2*sw**2) + (3*CKM3x1*ee**2*MD**2*MT**2*complexconjugate(CKM3x1))/(MW**2*sw**2) - (3*CKM3x1*ee**2*MT**4*complexconjugate(CKM3x1))/(2.*MW**2*sw**2) + (3*CKM3x1*ee**2*MW**2*complexconjugate(CKM3x1))/sw**2)*cmath.sqrt(MD**4 - 2*MD**2*MT**2 + MT**4 - 2*MD**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.t,P.s__tilde__):'(((-3*CKM3x2*ee**2*MS**2*complexconjugate(CKM3x2))/(2.*sw**2) - (3*CKM3x2*ee**2*MT**2*complexconjugate(CKM3x2))/(2.*sw**2) - (3*CKM3x2*ee**2*MS**4*complexconjugate(CKM3x2))/(2.*MW**2*sw**2) + (3*CKM3x2*ee**2*MS**2*MT**2*complexconjugate(CKM3x2))/(MW**2*sw**2) - (3*CKM3x2*ee**2*MT**4*complexconjugate(CKM3x2))/(2.*MW**2*sw**2) + (3*CKM3x2*ee**2*MW**2*complexconjugate(CKM3x2))/sw**2)*cmath.sqrt(MS**4 - 2*MS**2*MT**2 + MT**4 - 2*MS**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.u,P.b__tilde__):'(((-3*CKM1x3*ee**2*MB**2*complexconjugate(CKM1x3))/(2.*sw**2) - (3*CKM1x3*ee**2*MU**2*complexconjugate(CKM1x3))/(2.*sw**2) - (3*CKM1x3*ee**2*MB**4*complexconjugate(CKM1x3))/(2.*MW**2*sw**2) + (3*CKM1x3*ee**2*MB**2*MU**2*complexconjugate(CKM1x3))/(MW**2*sw**2) - (3*CKM1x3*ee**2*MU**4*complexconjugate(CKM1x3))/(2.*MW**2*sw**2) + (3*CKM1x3*ee**2*MW**2*complexconjugate(CKM1x3))/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MU**2 + MU**4 - 2*MB**2*MW**2 - 2*MU**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.u,P.d__tilde__):'(((-3*CKM1x1*ee**2*MD**2*complexconjugate(CKM1x1))/(2.*sw**2) - (3*CKM1x1*ee**2*MU**2*complexconjugate(CKM1x1))/(2.*sw**2) - (3*CKM1x1*ee**2*MD**4*complexconjugate(CKM1x1))/(2.*MW**2*sw**2) + (3*CKM1x1*ee**2*MD**2*MU**2*complexconjugate(CKM1x1))/(MW**2*sw**2) - (3*CKM1x1*ee**2*MU**4*complexconjugate(CKM1x1))/(2.*MW**2*sw**2) + (3*CKM1x1*ee**2*MW**2*complexconjugate(CKM1x1))/sw**2)*cmath.sqrt(MD**4 - 2*MD**2*MU**2 + MU**4 - 2*MD**2*MW**2 - 2*MU**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.u,P.s__tilde__):'(((-3*CKM1x2*ee**2*MS**2*complexconjugate(CKM1x2))/(2.*sw**2) - (3*CKM1x2*ee**2*MU**2*complexconjugate(CKM1x2))/(2.*sw**2) - (3*CKM1x2*ee**2*MS**4*complexconjugate(CKM1x2))/(2.*MW**2*sw**2) + (3*CKM1x2*ee**2*MS**2*MU**2*complexconjugate(CKM1x2))/(MW**2*sw**2) - (3*CKM1x2*ee**2*MU**4*complexconjugate(CKM1x2))/(2.*MW**2*sw**2) + (3*CKM1x2*ee**2*MW**2*complexconjugate(CKM1x2))/sw**2)*cmath.sqrt(MS**4 - 2*MS**2*MU**2 + MU**4 - 2*MS**2*MW**2 - 2*MU**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.ve,P.e__plus__):'((-Me**2 + MW**2)*(-(ee**2*Me**2)/(2.*sw**2) - (ee**2*Me**4)/(2.*MW**2*sw**2) + (ee**2*MW**2)/sw**2))/(48.*cmath.pi*abs(MW)**3)',
(P.vm,P.mu__plus__):'((-MMU**2 + MW**2)*(-(ee**2*MMU**2)/(2.*sw**2) - (ee**2*MMU**4)/(2.*MW**2*sw**2) + (ee**2*MW**2)/sw**2))/(48.*cmath.pi*abs(MW)**3)',
(P.vt,P.ta__plus__):'((-MTA**2 + MW**2)*(-(ee**2*MTA**2)/(2.*sw**2) - (ee**2*MTA**4)/(2.*MW**2*sw**2) + (ee**2*MW**2)/sw**2))/(48.*cmath.pi*abs(MW)**3)'})
Decay_Z = Decay(name = 'Decay_Z',
particle = P.Z,
partial_widths = {(P.b,P.b__tilde__):'((-7*ee**2*MB**2 + ee**2*MZ**2 - (3*cw**2*ee**2*MB**2)/(2.*sw**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) - (17*ee**2*MB**2*sw**2)/(6.*cw**2) + (5*ee**2*MZ**2*sw**2)/(6.*cw**2))*cmath.sqrt(-4*MB**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.c,P.c__tilde__):'((-11*ee**2*MC**2 - ee**2*MZ**2 - (3*cw**2*ee**2*MC**2)/(2.*sw**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (7*ee**2*MC**2*sw**2)/(6.*cw**2) + (17*ee**2*MZ**2*sw**2)/(6.*cw**2))*cmath.sqrt(-4*MC**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.d,P.d__tilde__):'((-7*ee**2*MD**2 + ee**2*MZ**2 - (3*cw**2*ee**2*MD**2)/(2.*sw**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) - (17*ee**2*MD**2*sw**2)/(6.*cw**2) + (5*ee**2*MZ**2*sw**2)/(6.*cw**2))*cmath.sqrt(-4*MD**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.e__minus__,P.e__plus__):'((-5*ee**2*Me**2 - ee**2*MZ**2 - (cw**2*ee**2*Me**2)/(2.*sw**2) + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (7*ee**2*Me**2*sw**2)/(2.*cw**2) + (5*ee**2*MZ**2*sw**2)/(2.*cw**2))*cmath.sqrt(-4*Me**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.mu__minus__,P.mu__plus__):'((-5*ee**2*MMU**2 - ee**2*MZ**2 - (cw**2*ee**2*MMU**2)/(2.*sw**2) + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (7*ee**2*MMU**2*sw**2)/(2.*cw**2) + (5*ee**2*MZ**2*sw**2)/(2.*cw**2))*cmath.sqrt(-4*MMU**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.s,P.s__tilde__):'((-7*ee**2*MS**2 + ee**2*MZ**2 - (3*cw**2*ee**2*MS**2)/(2.*sw**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) - (17*ee**2*MS**2*sw**2)/(6.*cw**2) + (5*ee**2*MZ**2*sw**2)/(6.*cw**2))*cmath.sqrt(-4*MS**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.t,P.t__tilde__):'((-11*ee**2*MT**2 - ee**2*MZ**2 - (3*cw**2*ee**2*MT**2)/(2.*sw**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (7*ee**2*MT**2*sw**2)/(6.*cw**2) + (17*ee**2*MZ**2*sw**2)/(6.*cw**2))*cmath.sqrt(-4*MT**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.ta__minus__,P.ta__plus__):'((-5*ee**2*MTA**2 - ee**2*MZ**2 - (cw**2*ee**2*MTA**2)/(2.*sw**2) + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (7*ee**2*MTA**2*sw**2)/(2.*cw**2) + (5*ee**2*MZ**2*sw**2)/(2.*cw**2))*cmath.sqrt(-4*MTA**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.u,P.u__tilde__):'((-11*ee**2*MU**2 - ee**2*MZ**2 - (3*cw**2*ee**2*MU**2)/(2.*sw**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (7*ee**2*MU**2*sw**2)/(6.*cw**2) + (17*ee**2*MZ**2*sw**2)/(6.*cw**2))*cmath.sqrt(-4*MU**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.ve,P.ve__tilde__):'(MZ**2*(ee**2*MZ**2 + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.vm,P.vm__tilde__):'(MZ**2*(ee**2*MZ**2 + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.vt,P.vt__tilde__):'(MZ**2*(ee**2*MZ**2 + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.W__minus__,P.W__plus__):'(((-12*cw**2*ee**2*MW**2)/sw**2 - (17*cw**2*ee**2*MZ**2)/sw**2 + (4*cw**2*ee**2*MZ**4)/(MW**2*sw**2) + (cw**2*ee**2*MZ**6)/(4.*MW**4*sw**2))*cmath.sqrt(-4*MW**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)'})
| 241.867188
| 701
| 0.547078
| 5,805
| 30,959
| 2.841171
| 0.023256
| 0.044928
| 0.048748
| 0.043049
| 0.918753
| 0.893531
| 0.874492
| 0.856303
| 0.829079
| 0.823925
| 0
| 0.13917
| 0.134048
| 30,959
| 127
| 702
| 243.771654
| 0.476034
| 0.005039
| 0
| 0
| 0
| 0.716981
| 0.815118
| 0.687707
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018868
| 0
| 0.018868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
0a217410b98cfc852d24d4bfb32c93bb3ad5f318
| 9,310
|
py
|
Python
|
app_files/main/draw.py
|
ammaralt/fyp-project
|
1edd9cd01762c392b338c486b5aba932130f2a77
|
[
"Apache-2.0"
] | 3
|
2021-12-27T06:52:07.000Z
|
2022-02-19T20:58:48.000Z
|
app_files/main/draw.py
|
ammaralt/fyp-project
|
1edd9cd01762c392b338c486b5aba932130f2a77
|
[
"Apache-2.0"
] | 1
|
2022-03-03T13:02:40.000Z
|
2022-03-03T13:02:40.000Z
|
app_files/main/draw.py
|
ammaralt/fyp-project
|
1edd9cd01762c392b338c486b5aba932130f2a77
|
[
"Apache-2.0"
] | 2
|
2022-03-04T04:23:10.000Z
|
2022-03-07T19:57:19.000Z
|
import cv2 as cv
def draw_landmarks(image, landmark_point):
if len(landmark_point) > 0:
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),
(255, 255, 255), 2)
for index, landmark in enumerate(landmark_point):
if index == 0:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 1:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 2:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 3:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 4:
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 5:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 6:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 7:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 8:
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 9:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 10:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 11:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 12:
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 13:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 14:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 15:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 16:
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 17:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 18:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 19:
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 20:
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
return image
def draw_info_text(image, handedness, hand_sign_text):
info_text = handedness.classification[0].label[0:]
if hand_sign_text != "":
info_text = "Predicted Text" + ':' + hand_sign_text
cv.putText(image, info_text, (10, 60), cv.FONT_HERSHEY_SIMPLEX, 1.0, (196, 255, 255), 2, cv.LINE_AA)
return image
| 47.258883
| 104
| 0.497852
| 1,270
| 9,310
| 3.569291
| 0.044882
| 0.249504
| 0.333554
| 0.148246
| 0.930068
| 0.918376
| 0.918376
| 0.918376
| 0.918376
| 0.918156
| 0
| 0.140894
| 0.319979
| 9,310
| 197
| 105
| 47.258883
| 0.575107
| 0
| 0
| 0.832402
| 0
| 0
| 0.001611
| 0
| 0.005587
| 0
| 0
| 0
| 0
| 1
| 0.011173
| false
| 0
| 0.005587
| 0
| 0.027933
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0a2541a673a0eb06c7fe04fcdc66de461976f9bb
| 75
|
py
|
Python
|
tests/testapp/scripts/invalid_import_script.py
|
behconsci/django-extensions
|
716832fb9fbe78c6970930b378e0c0962beccd2e
|
[
"MIT"
] | 1
|
2019-04-15T10:28:42.000Z
|
2019-04-15T10:28:42.000Z
|
tests/testapp/scripts/invalid_import_script.py
|
behconsci/django-extensions
|
716832fb9fbe78c6970930b378e0c0962beccd2e
|
[
"MIT"
] | null | null | null |
tests/testapp/scripts/invalid_import_script.py
|
behconsci/django-extensions
|
716832fb9fbe78c6970930b378e0c0962beccd2e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import invalidpackage # NOQA
def run():
pass
| 10.714286
| 28
| 0.586667
| 9
| 75
| 4.888889
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.24
| 75
| 6
| 29
| 12.5
| 0.754386
| 0.346667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
0a36a397bf11280d75fd71c3df712a63db55b62b
| 119
|
py
|
Python
|
modules/junk_test.py
|
mwanakijiji/rrlfe2
|
0637b348b8d3e54ff34c56caa8b4c6fdac1c699e
|
[
"MIT"
] | null | null | null |
modules/junk_test.py
|
mwanakijiji/rrlfe2
|
0637b348b8d3e54ff34c56caa8b4c6fdac1c699e
|
[
"MIT"
] | 18
|
2022-01-13T14:43:57.000Z
|
2022-03-24T12:52:41.000Z
|
modules/junk_test.py
|
mwanakijiji/rrlyrae_metallicity
|
1aa867eb9c96dba433271207efdf758cc7849360
|
[
"MIT"
] | null | null | null |
import astropy
#from modules2 import *
#from modules2 import create_spec_realizations
def junk_fcn():
return "1"
| 14.875
| 46
| 0.764706
| 16
| 119
| 5.5
| 0.75
| 0.272727
| 0.409091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.168067
| 119
| 7
| 47
| 17
| 0.858586
| 0.563025
| 0
| 0
| 0
| 0
| 0.02
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
0a6f0f76784a7f2e00ca4a6af4cf86a27566ba58
| 2,310
|
py
|
Python
|
common/http_util.py
|
mozhumz/machine_learning_py
|
880f6778ac16b0a16a80b31972a35304caa91dc1
|
[
"MulanPSL-1.0"
] | null | null | null |
common/http_util.py
|
mozhumz/machine_learning_py
|
880f6778ac16b0a16a80b31972a35304caa91dc1
|
[
"MulanPSL-1.0"
] | null | null | null |
common/http_util.py
|
mozhumz/machine_learning_py
|
880f6778ac16b0a16a80b31972a35304caa91dc1
|
[
"MulanPSL-1.0"
] | null | null | null |
from http.cookies import SimpleCookie
def parse_cookie(cookie):
sck=SimpleCookie(cookie)
return {v.key:v.value for k,v in sck.items()}
if __name__ == '__main__':
cookie = '_zap=da65d76c-fbcb-4c76-b731-1bbff7fb04ec; d_c0="AJCaOogwaxGPTjuq8M-UoT-tMeRndcdBNWM=|1592031146"; _ga=GA1.2.2020371219.1592031151; _xsrf=FC5ygIfy9Id687u5zuQ2rhDZD26rAfS2; __snaker__id=YqPDWFm68rU5FPKm; _9755xjdesxxd_=32; YD00517437729195%3AWM_TID=n6EMpKJWJsFFQERRAUMqyz%2FmzH62puZU; gdxidpyhxdE=bCfVQYiVxphcSI1VuWp82ViBonGYx%2B94%2Bc59UNOInO1gyEZtm5VejBMyUUSuk7whM5XUv7ESISl6dpSEd2hv%5CsoERW3E4Xm%2FQ2dLV9iEvwJP1ITidZXEe%5CYTyX67LjVowQlI4k%5CLs9fh%2Bvzxgh9yfSgEoSAkGJmjPz89r6V3cmuOKjIQ%3A1633443302320; YD00517437729195%3AWM_NI=Nv46%2F8vUXvDaBROGu%2BpcxIzpu%2B6QFUlVro4L7PuPWimUEgml5RJ%2BMJKE48N0qX6J6IbAwUaTzO4zsqYix5x8oRkI2VVLuYXeMxss5Ic5I2RMrDr7%2FKvASeb0ILD2gQyheG8%3D; YD00517437729195%3AWM_NIKE=9ca17ae2e6ffcda170e2e6eed1aa34b6b782d4b5748d868fa7c14f929b8b84f87c85868b91c453a1b09fd6cf2af0fea7c3b92a8eaafcd7ae34b68b9897c76b828e81a8f821b4bdb8aaf97cb7f5fab2c55fb08abe90b748f590bbb1d663af8b8b88f068f78e86ccd042899bfcd9cd3cb388faabb543a29fb9b6e460ba91fc9bf27b9bb7b8d6fb34ad8ea5aab15e9694bbbadb6f8fa8bd94b843a1eb87d3d147b29cbed8ae41b69f9d86e13c8b948cd3e65bbbb8aba8bb37e2a3; z_c0="2|1:0|10:1633442413|4:z_c0|92:Mi4xT3pVSUJBQUFBQUFBa0pvNmlEQnJFU1lBQUFCZ0FsVk5iYWhKWWdBZDg4OG1RT2t6Q1NNX1N4TU5zVUtFcHd1Q0F3|5fcf3e2ca5ef5c2ab208793aa1a0c1fcf44564aff46faea55c815141169af26e"; tst=r; q_c1=be1793119d154b20846b05d3dad2418f|1636173236000|1636173236000; __utmc=51854390; __utmz=51854390.1636173236.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmv=51854390.100--|2=registration_date=20170201=1^3=entry_date=20170201=1; SESSIONID=iszLu5J1gez7KZsprLPb2iITVrMF816iqGlupSPDSg8; JOID=UVoXBklMBnK8rgJVfEpGLPNz_dJsJHUP99o2H0oqYgiM4lknDPQc9NuuA1J86RYP8NUH8ljHVZGZIRi_BLLKpbc=; osd=UlERB0lPDXS9rgFeektGL_h1_NJvL3MO99k9GUsqYQOK41kkB_Id9NilBVN86h0J8dUE-V7GVZKSJxm_B7nMpLc=; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1636173082,1636173200,1636181886; NOT_UNREGISTER_WAITING=1; __utma=51854390.2020371219.1592031151.1636173610.1636184011.4; __utmb=51854390.0.10.1636184011; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1636184127; KLBRSID=d1f07ca9b929274b65d830a00cbd719a|1636184217|1636173080'
print('"%s"'%parse_cookie(cookie).get('d_c0'))
| 256.666667
| 2,087
| 0.898268
| 177
| 2,310
| 11.435028
| 0.762712
| 0.029644
| 0.016798
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.349199
| 0.02684
| 2,310
| 9
| 2,088
| 256.666667
| 0.551157
| 0
| 0
| 0
| 0
| 0.142857
| 0.903505
| 0.868888
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0.142857
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a709c9c302fb4bac026340e77977bab3b9c4204
| 61
|
py
|
Python
|
wikicodename/__init__.py
|
bartlomiej-zdrojewski/wikicodename
|
256483479e9124323c1e14db9fdb4ed23117a2fc
|
[
"MIT"
] | null | null | null |
wikicodename/__init__.py
|
bartlomiej-zdrojewski/wikicodename
|
256483479e9124323c1e14db9fdb4ed23117a2fc
|
[
"MIT"
] | null | null | null |
wikicodename/__init__.py
|
bartlomiej-zdrojewski/wikicodename
|
256483479e9124323c1e14db9fdb4ed23117a2fc
|
[
"MIT"
] | null | null | null |
from .wiki_data import Cache
from .wiki_data import WikiData
| 20.333333
| 31
| 0.836066
| 10
| 61
| 4.9
| 0.6
| 0.326531
| 0.489796
| 0.734694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131148
| 61
| 2
| 32
| 30.5
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
0a7ab96b2a1419e3fdbf053cddc1cb763c486286
| 130
|
py
|
Python
|
env/lib/python3.6/site-packages/import_export/signals.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 132
|
2015-01-06T21:43:42.000Z
|
2021-04-05T10:01:49.000Z
|
env/lib/python3.6/site-packages/import_export/signals.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 87
|
2015-01-04T08:24:19.000Z
|
2017-05-08T18:09:45.000Z
|
env/lib/python3.6/site-packages/import_export/signals.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 95
|
2015-01-01T22:30:15.000Z
|
2021-06-25T14:54:16.000Z
|
from django.dispatch import Signal
post_export = Signal(providing_args=["model"])
post_import = Signal(providing_args=["model"])
| 26
| 46
| 0.784615
| 17
| 130
| 5.764706
| 0.588235
| 0.244898
| 0.387755
| 0.489796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084615
| 130
| 4
| 47
| 32.5
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0a9a967880c9255e71296c5c383c13d8b9cdab5c
| 107
|
py
|
Python
|
rpython/jit/backend/x86/test/test_zrpy_gc_boehm.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
rpython/jit/backend/x86/test/test_zrpy_gc_boehm.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
rpython/jit/backend/x86/test/test_zrpy_gc_boehm.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
from rpython.jit.backend.llsupport.test.zrpy_gc_boehm_test import compile_boehm_test as test_compile_boehm
| 53.5
| 106
| 0.897196
| 18
| 107
| 4.944444
| 0.666667
| 0.202247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056075
| 107
| 1
| 107
| 107
| 0.881188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0a9a9efe5a790eb96299aec21b2331b2cb80da34
| 82,448
|
py
|
Python
|
routeTracker/UI.py
|
bt530/routeTracker
|
8d546836e42293ec8444da201555378dbe85a820
|
[
"MIT"
] | 1
|
2021-06-26T08:07:33.000Z
|
2021-06-26T08:07:33.000Z
|
routeTracker/UI.py
|
bt530/routeTracker
|
8d546836e42293ec8444da201555378dbe85a820
|
[
"MIT"
] | 1
|
2021-05-27T00:58:59.000Z
|
2021-05-27T00:58:59.000Z
|
routeTracker/UI.py
|
bt530/routeTracker
|
8d546836e42293ec8444da201555378dbe85a820
|
[
"MIT"
] | 1
|
2021-05-19T08:37:59.000Z
|
2021-05-19T08:37:59.000Z
|
import tkinter as tk
import ctypes
import time
import math
import pickle
import winsound
from tkinter.filedialog import askopenfilename, askdirectory
from tkinter import messagebox
import os
import pyperclip
import webbrowser
import traceback
import datetime
import sys
if getattr(sys, 'frozen', False): # Running as compiled
running_dir = sys._MEIPASS + "/files/" # Same path name than pyinstaller option
else:
running_dir = "./" # Path name when run with Python interpreter
ICON = running_dir + "carrier.ico"
class POINT(ctypes.Structure):
_fields_ = [("x", ctypes.c_ulong), ("y", ctypes.c_ulong)]
def mousePosition():
pt = POINT()
ctypes.windll.user32.GetCursorPos(ctypes.byref(pt))
return int(pt.x), int(pt.y)
class UserInterface():
def __init__(self, reader, debug=False):
user32 = ctypes.windll.user32
width, height = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)
dataTemplate = {'window position': [width / 2 - 250, height / 4],
'route positions': {},
'showType': 'show',
'topmost': 1,
'alarm': True,
'logLocation': '',
'shipCargo': 0,
'carrierCargo': 0,
'more': False,
'jumpStart': '00:00',
'jumpEnd': '23:59'
}
self.exiting = False
self.debug = debug
# self.logReader=reader
self.maxCountdown = 60 * 21
self.logCheck = 5
self.logReader = reader
self.scroll = 0
self.dragOffset = [0, 0]
self.scrolling = False
self.stopLocations = []
self.pristineRings = []
self.countdown = self.maxCountdown
self.countdownStart = time.time()
self.logStart = 0
self.currentFileDataKeys = {}
self.currentFileData = [['unknown']]
self.system = None
self.nextSystem = 'unknown'
self.currentFile = None
self.position = 0
self.dragging = False
self.draggingPos = [width / 2 - 250, height / 4]
self.hovering = False
self.scrollTop = [0, 0]
self.scrollBottom = [0, 0]
try:
with open("trackerData.txt", "rb") as f:
self.data = pickle.load(f)
except FileNotFoundError:
self.data = dataTemplate
with open("trackerData.txt", "wb") as f:
pickle.dump(self.data, f)
added = False
dataKeys = list(self.data.keys())
for i in list(dataTemplate.keys()):
if i not in dataKeys:
self.data[i] = dataTemplate[i]
added = True
if added:
with open("trackerData.txt", "wb") as f:
pickle.dump(self.data, f)
if "current file" in list(self.data.keys()):
self.currentFile = self.data["current file"]
self.openFile(dialogue=False)
if self.data['logLocation'] != '':
self.logReader.folderLocation = self.data['logLocation']
self.createWindow()
def mainLoop(self):
timeLoop = time.time()
while True:
time.sleep(0.01)
try:
pyperclip.paste()
if self.exiting:
self.saveData()
self.window.destroy()
self.root.destroy()
try:
self.settingsWindow.destroy()
except AttributeError:
pass
except tk._tkinter.TclError:
pass
break
# self.menu.update()
currentTime = time.time()
if currentTime - self.logStart > self.logCheck and self.currentFileData != None:
self.logStart = currentTime
self.logReader.updateLog()
# print(self.logReader.oldSystem,self.logReader.currentSystem)
if self.logReader.oldSystem != self.logReader.currentSystem:
# print("Jumped to "+self.logReader.currentSystem)
self.nextSystem = 'unknown'
for i in range(self.position, len(self.currentFileData) - 1):
##print(i)
##print(ui.currentFileData[i])
if self.currentFileData[i][self.currentFileDataKeys['System Name']] == self.logReader.currentSystem:
# print('copied ' + self.nextSystem + ' to clipboard')
if self.currentFileData[i + 1][self.currentFileDataKeys['System Name']] == \
self.currentFileData[i][self.currentFileDataKeys['System Name']]:
self.position = i + 1
# print('double')
else:
self.position = i
self.nextSystem = self.currentFileData[self.position + 1][
self.currentFileDataKeys['System Name']]
pyperclip.copy(self.nextSystem)
self.data['route positions'][self.currentFile] = self.position
self.saveData()
# try:
self.clear()
"""
except Exception as e:
#print(e)"""
break
# try:
self.root.update()
x, y = mousePosition()
if self.hovering:
self.clear()
self.hovering = False
if self.dragging:
self.data['window position'] = [x - self.dragOffset[0], y - self.dragOffset[1]]
self.clear()
elif self.scrolling and self.scrollLength < len(self.currentFileData):
proportion = (y - self.barCentre - self.scrollTop[1]) / self.scrollHeight
self.scroll = round(proportion * len(self.currentFileData) - self.position)
self.limitScroll()
self.clear()
elif currentTime - timeLoop > 1:
self.clear()
timeLoop = currentTime
"""
if self.data['topmost'] == 0:
if not self.window.focus_displayof():
if topSet != 0:
self.window.attributes('-topmost', 0)
topSet=0
elif topSet != 1:
self.window.attributes('-topmost', 1)
topSet=1
##print(topSet)
"""
"""
except Exception as e:
if e == SystemExit:
break
else:
self.exiting=True
#print(e)"""
try:
self.settingsWindow.update()
except AttributeError:
pass
except tk._tkinter.TclError:
pass
except pyperclip.PyperclipWindowsException:
time.sleep(2)
def openFile(self, dialogue=True):
self.scroll = 0
if dialogue:
self.currentFile = askopenfilename()
self.data["current file"] = self.currentFile
if self.currentFile != '':
# print(self.currentFile)
# print(self.data)
if self.currentFile in list(self.data['route positions'].keys()):
self.position = self.data['route positions'][self.currentFile]
else:
self.position = 0
self.data['route positions'][self.currentFile] = self.position
self.saveData()
try:
with open(self.currentFile, 'r') as f:
self.currentFileData = f.read()
self.currentFileData = "".join(self.currentFileData.split("\""))
self.currentFileData = self.currentFileData.split("\n")
self.currentFileData = [i.split(",") for i in self.currentFileData]
##print(currentFileData)
self.currentFileDataKeys = {}
for i in range(len(self.currentFileData[0])):
self.currentFileDataKeys[self.currentFileData[0][i]] = i
del self.currentFileData[0]
if [''] in self.currentFileData:
self.currentFileData.remove([''])
self.stopLocations = []
self.pristineRings=[]
for i in range(len(self.currentFileData) - 1):
if self.currentFileData[i][self.currentFileDataKeys['System Name']] == self.currentFileData[i + 1][
self.currentFileDataKeys['System Name']]:
self.stopLocations.append(i)
if self.currentFileData[i][self.currentFileDataKeys['Pristine']] == 'Yes':
self.pristineRings.append(i)
##print(self.currentFileData[i])
##print(self.stopLocations)
except FileNotFoundError as e:
messagebox.showerror("Import Error", e)
if self.data['showType'] == 'show':
self.logReader.resetValues()
self.logStart = 0
self.createWindow()
def saveData(self, values=None):
with open("trackerData.txt", "wb") as f:
pickle.dump(self.data, f)
# overlay functions
def clear(self):
# all to change with new UI
try:
self.canvas.destroy()
except:
pass
clip = pyperclip.paste()
x, y = self.data['window position'][0], self.data['window position'][1]
self.canvas = tk.Canvas(self.window, bg="pink", bd=0, highlightthickness=0, relief='ridge')
self.canvas.pack(fill="both", expand=True)
self.canvas.create_rectangle(x, y, x + 520, y + 30, fill='black')
if self.logReader.currentSystem == clip:
self.canvas.create_text(x + 5, y + 5, text=self.logReader.currentSystem, font="Ebrima 13 bold",
fill='green', anchor='nw')
else:
self.canvas.create_text(x + 5, y + 5, text=self.logReader.currentSystem, font="Ebrima 13 bold",
fill='orange', anchor='nw')
self.canvas.create_rectangle(x + 150, y, x + 500, y + 30, fill='black')
self.canvas.create_text(x + 158, y + 5, text='>> ', font="Ebrima 13 bold", fill='orange', anchor='nw')
if self.nextSystem == clip:
self.canvas.create_text(x + 190, y + 5, text=self.nextSystem, font="Ebrima 13 bold", fill='green',
anchor='nw')
else:
self.canvas.create_text(x + 190, y + 5, text=self.nextSystem, font="Ebrima 13 bold", fill='orange',
anchor='nw')
self.canvas.create_rectangle(x + 340, y, x + 500, y + 30, fill='black')
timeSince = time.time() - self.logReader.lastJumpRequest
timeSince = self.maxCountdown - timeSince
if timeSince > 0:
if timeSince < 10 and self.data['alarm']:
winsound.Beep(3000, 100)
mins = str(round(timeSince // 60))
seconds = str(math.floor(timeSince % 60))
if len(mins) == 1:
mins = '0' + mins
if len(seconds) == 1:
seconds = '0' + seconds
text = mins + ':' + seconds
else:
text = 'Ready'
text = '| ' + text + ' |'
self.canvas.create_text(x + 350, y + 5, text=text, font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 420, y + 5, text='☰', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 440, y + 5, text='📁', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 463, y + 5, text='⚙', font="Ebrima 13 bold", fill='orange', anchor='nw')
if self.data['topmost'] == 1:
self.canvas.create_text(x + 485, y + 5, text='⮝', font="Ebrima 13 bold", fill='orange', anchor='nw')
else:
self.canvas.create_text(x + 485, y + 5, text='⮟', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 500, y + 5, text='✘', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_line(x, y, x + 520, y, fill='orange')
self.canvas.create_line(x, y + 30, x + 520, y + 30, fill='orange')
if self.data['more']:
self.createDashboard()
def createDashboard(self):
mouseX, mouseY = mousePosition()
x, y = self.data['window position'][0], self.data['window position'][1]
try:
self.canvas.create_rectangle(x, y + 35, x + 520, y + 600, fill='black', outline='orange')
# pannel backgrounds
self.canvas.create_rectangle(x + 10, y + 40, x + 510, y + 150, fill='#111111', outline='#333333')
self.canvas.create_rectangle(x + 10, y + 160, x + 510, y + 270, fill='#111111', outline='#333333')
self.canvas.create_rectangle(x + 10, y + 280, x + 510, y + 540, fill='#111111', outline='#333333')
horPos = self.position / len(self.currentFileData) * 480 + 20
self.canvas.create_line(x + 20, y + 80, x + horPos, y + 80, fill='orange', width=2, dash=10)
self.canvas.create_line(x + horPos, y + 80, x + 500, y + 80, fill='orange', width=2)
above = False
for i in [0] + self.stopLocations:
horPos = i / len(self.currentFileData) * 480 + 20
if i in self.pristineRings:
colour = '#a1c5ff'
else:
colour = 'orange'
if i in self.stopLocations:
size=3
else:
size=1
if above and (mouseX - (x + horPos)) ** 2 + (mouseY - (y + 80)) ** 2 < size ** 2:
if horPos < 250:
anchor = 'w'
else:
anchor = 'e'
self.canvas.create_line(x + horPos, y + 70, x + horPos, y + 80, fill=colour)
jumps=i - self.position
if jumps > 0:
eta = ' | ' + self.getETA(jumps=jumps)
else:
eta=''
self.canvas.create_text(x + horPos, y + 60,
text=self.currentFileData[i][self.currentFileDataKeys['System Name']] + eta,
font="Ebrima 10 bold", fill=colour, anchor=anchor)
self.canvas.create_oval(x + horPos - size*1.5, y + 80 - size*1.5, x + horPos + size*1.5, y + 80 + size*1.5, fill=colour,
outline=colour)
elif not above:
self.canvas.create_rectangle(x + horPos - 8, y + 80, x + 500, y + 120, fill='#111111',
outline='#111111')
self.canvas.create_line(x + horPos, y + 80, x + horPos, y + 90, fill='orange')
self.canvas.create_text(x + horPos, y + 95,
text=self.currentFileData[i][self.currentFileDataKeys['System Name']],
font="Ebrima 10 bold", fill='orange', anchor='w')
self.canvas.create_oval(x + horPos - size, y + 80 - size, x + horPos + 5, y + 80 + size, fill=colour, outline=colour)
above = True
horPos = 500
jumps = len(self.currentFileData) - 1 - self.position
if jumps > 0:
eta = ' | ' + self.getETA(jumps=jumps)
else:
eta = ''
#self.canvas.create_rectangle(x + horPos - 10, y + 80, x + 500, y + 120, fill='#111111', outline='#111111')
self.canvas.create_line(x + horPos, y + 80, x + horPos, y + 90, fill='orange')
self.canvas.create_text(x + horPos, y + 95,
text=" " + self.currentFileData[-1][self.currentFileDataKeys['System Name']] + eta,
font="Ebrima 10 bold", fill='orange', anchor='e')
##print(self.stopLocations)
self.canvas.create_oval(x + 15, y + 75, x + 25, y + 85, fill='orange', outline='orange')
self.canvas.create_oval(x + 495, y + 75, x + 505, y + 85, fill='orange', outline='orange')
self.canvas.create_text(x + 20, y + 130, text="Jumps | Completed: " + str(self.position),
font="Ebrima 13 bold", fill='orange', anchor='w')
found = False
for i in self.stopLocations:
diff = i - self.position
if diff >= 0:
self.canvas.create_text(x + 220, y + 130, text="| To Waypoint: " + str(diff), font="Ebrima 13 bold",
fill='orange', anchor='w')
found = True
break
if not found:
self.canvas.create_text(x + 220, y + 130,
text="| To Waypoint: " + str(len(self.currentFileData) - self.position - 1),
font="Ebrima 13 bold", fill='orange', anchor='w')
self.canvas.create_text(x + 380, y + 130,
text="| Left: " + str(len(self.currentFileData) - self.position - 1),
font="Ebrima 13 bold", fill='orange', anchor='w')
"""
for i in self.stopLocations + self.pristineRings:
if i in self.stopLocations:
fill = 'orange'
outline = 'orange'
else:
fill = 'orange'
outline = 'orange'
horPos = i / len(self.currentFileData) * 480 + 20
self.canvas.create_oval(x + horPos - 3, y + 77, x + horPos + 3, y + 83, fill=fill, outline=outline)
##print('h',horPos)
##print(self.stopLocations)
"""
horPos = self.position / len(self.currentFileData) * 480 + 20
self.canvas.create_polygon(x + horPos - 5, y + 85, x + horPos, y + 75, x + horPos + 5, y + 85,
fill='#00ff00', outline='#00ff00')
try:
reqFuel = self.currentFileData[self.position][self.currentFileDataKeys['Tritium in market']]
reqFuel = int(reqFuel)
if reqFuel > 0:
reqFuel += 1000
else:
for i in range(self.position, len(self.currentFileData)):
reqFuel += int(self.currentFileData[i][self.currentFileDataKeys['Fuel Used']])
reqFuel -= int(self.currentFileData[self.position][self.currentFileDataKeys['Fuel Used']])
except IndexError:
reqFuel = 'Error'
tankFuel = self.logReader.carrierFuel
shipFuel = self.logReader.shipInventory - self.data['shipCargo']
carrierFuel = self.logReader.carrierInventory - self.data['carrierCargo']
self.canvas.create_text(x + 20, y + 180, text="Tritium | ", font="Ebrima 13 bold", fill='orange',
anchor='w')
self.canvas.create_text(x + 95, y + 180, text="Tank: " + str(tankFuel), font="Ebrima 13 bold", fill='green',
anchor='w')
self.canvas.create_text(x + 190, y + 180, text="| Ship: " + str(shipFuel), font="Ebrima 13 bold",
fill='blue', anchor='w')
self.canvas.create_text(x + 280, y + 180, text="| Cargo: " + str(carrierFuel), font="Ebrima 13 bold",
fill='orange', anchor='w')
self.canvas.create_text(x + 400, y + 180, text="| Min: " + str(reqFuel), font="Ebrima 13 bold", fill='red',
anchor='w')
fuelTotal = tankFuel + shipFuel + carrierFuel
if reqFuel == 'Error':
reqFuel = 0
width = max(fuelTotal, reqFuel) / 480
self.canvas.create_rectangle(x + 20, y + 210, x + 20 + reqFuel / width, y + 230, fill='red', outline='red',
stipple='gray25')
self.canvas.create_rectangle(x + 20, y + 210, x + 20 + tankFuel / width, y + 230, fill='green',
outline='green')
self.canvas.create_rectangle(x + 20 + tankFuel / width, y + 210,
x + 20 + shipFuel / width + tankFuel / width, y + 230, fill='blue',
outline='blue')
self.canvas.create_rectangle(x + 20 + shipFuel / width + tankFuel / width, y + 210,
x + 20 + shipFuel / width + tankFuel / width + carrierFuel / width, y + 230,
fill='orange', outline='orange')
self.canvas.create_rectangle(x + 20 + reqFuel / width - 2, y + 210, x + 20 + reqFuel / width, y + 230,
fill='red', outline='red')
diff = fuelTotal - reqFuel
if diff >= 0:
self.canvas.create_text(x + 260, y + 250, text="You are " + str(diff) + " Tritium in excess",
font="Ebrima 13 bold", fill='green')
else:
self.canvas.create_text(x + 260, y + 250, text="Warning! You are " + str(-diff) + " Tritium short!",
font="Ebrima 13 bold", fill='red')
self.canvas.create_text(x + 260, y + 197,
text="Please note you need to open the carrier management page to update this.",
font="Ebrima 8 bold", fill='orange')
# routeList
length = 10
self.scrollLength = length
verticalSpacing = 25
self.verticalSpacing = verticalSpacing
boxHeight = 20
self.boxHeight = boxHeight
startY = 290
self.scrollHeight = verticalSpacing * (length - 1) + boxHeight
barHeight = min(length / len(self.currentFileData) * self.scrollHeight, self.scrollHeight)
self.barCentre = barHeight / 2
barPosition = y + (self.position + self.scroll) / len(self.currentFileData) * self.scrollHeight + startY
clipboard = pyperclip.paste()
for i in range(length):
if self.position + self.scroll + i < len(self.currentFileData):
if self.currentFileData[self.position + self.scroll + i][
self.currentFileDataKeys['System Name']] == clipboard:
boxFill = 'green'
textFill = 'black'
elif self.scroll + i == 0:
boxFill = 'orange'
textFill = 'black'
elif self.position + self.scroll + i in self.stopLocations or self.position + self.scroll + i - 1 in self.stopLocations:
boxFill = 'red'
textFill = 'black'
else:
boxFill = 'black'
textFill = 'orange'
if self.position + self.scroll + i in self.pristineRings:
textFill = '#a1c5ff'
self.canvas.create_rectangle(x + 15, y + startY + verticalSpacing * i, x + 490,
y + startY + verticalSpacing * i + boxHeight, fill=boxFill,
outline='orange')
self.canvas.create_text(x + 17, y + startY + verticalSpacing * i,
text=self.currentFileData[self.position + self.scroll + i][
self.currentFileDataKeys['System Name']], font="Ebrima 12 bold",
fill=textFill, anchor='nw')
#print(self.scroll + i - self.position)
if self.scroll + i > 0:
#print('eta')
eta = self.getETA(jumps=self.scroll + i)
self.canvas.create_text(x + 490, y + startY + verticalSpacing * i,
text=eta, font="Ebrima 12 bold",
fill=textFill, anchor='ne')
self.canvas.create_rectangle(x + 497, y + startY, x + 505, y + startY + self.scrollHeight, fill='black',
outline='orange')
self.scrollTop = [x + 497, y + startY]
self.scrollBottom = [x + 505, y + startY + verticalSpacing * (length - 1) + boxHeight]
self.canvas.create_rectangle(x + 497, barPosition, x + 505, barPosition + barHeight, fill='orange',
outline='orange')
for i in self.pristineRings:
barPosition = y + i / len(self.currentFileData) * self.scrollHeight + startY
self.canvas.create_rectangle(x + 497, barPosition, x + 505, barPosition + 1, fill='#a1c5ff', outline='#a1c5ff')
for i in self.stopLocations:
barPosition = y + i / len(self.currentFileData) * self.scrollHeight + startY
self.canvas.create_rectangle(x + 497, barPosition, x + 505, barPosition + 1, fill='red', outline='red')
for i in self.pristineRings:
if i in self.stopLocations:
barPosition = y + i / len(self.currentFileData) * self.scrollHeight + startY
self.canvas.create_rectangle(x + 497, barPosition, x + 498, barPosition + 1, fill='#a1c5ff', outline='#a1c5ff')
self.canvas.create_rectangle(x + 499, barPosition, x + 500, barPosition + 1, fill='#a1c5ff',
outline='#a1c5ff')
self.canvas.create_rectangle(x + 501, barPosition, x + 502, barPosition + 1, fill='#a1c5ff',
outline='#a1c5ff')
self.canvas.create_rectangle(x + 503, barPosition, x + 504, barPosition + 1, fill='#a1c5ff',
outline='#a1c5ff')
barPosition = y + self.position / len(self.currentFileData) * self.scrollHeight + startY
self.canvas.create_rectangle(x + 497, barPosition, x + 505, barPosition + 1, fill='orange',
outline='orange')
except Exception as e:
self.canvas.create_rectangle(x, y + 35, x + 520, y + 600, fill='black', outline='orange')
self.canvas.create_text(x + 260, y + 250, text=traceback.format_exc(), font="Ebrima 13 bold", fill='red')
def getETA(self, jumps):
start = self.data['jumpStart']
end = self.data['jumpEnd']
# print(start,end)
start = start.split(':')
end = end.split(':')
start = sum([int(val) * 60 ** (1 - i) for i, val in enumerate(start)])
end = sum([int(val) * 60 ** (1 - i) for i, val in enumerate(end)])
if end < start:
end += 1440
gap = 1440 - (end - start)
alreadyQueued = time.time() - self.logReader.lastJumpRequest
if alreadyQueued < 21 * 60:
#print('from last jump')
now = datetime.datetime.utcfromtimestamp(self.logReader.lastJumpRequest)
#print(now)
else:
now = datetime.datetime.now(datetime.timezone.utc)
nowReset = now - datetime.timedelta(hours=now.hour,minutes=now.minute)
todayMinutes = sum([int(val) * 60 ** (1 - i) for i, val in enumerate([now.hour, now.minute])])
#print(todayMinutes)
#print(start)
if todayMinutes + 1440 < end and todayMinutes < start:
#print('shift')
end -= 1440
start -= 1440
todayStart = max(todayMinutes, start)
#print('ts',todayStart)
todayAvailableJumps = (end - todayStart) // 21 + 1
regularAvailableJumps = (end - start) // 21 + 1
if jumps <= todayAvailableJumps:
eta = nowReset + datetime.timedelta(minutes = todayStart + jumps * 21 - 5)
elif (jumps - todayAvailableJumps) % regularAvailableJumps != 0:
eta = nowReset + datetime.timedelta(minutes = 1440 + start + (jumps - todayAvailableJumps) // regularAvailableJumps * 1440 + ((
jumps - todayAvailableJumps) % regularAvailableJumps) * 21 - 5)
else:
eta = nowReset + datetime.timedelta(
minutes=1440 + start + ((jumps - todayAvailableJumps) // regularAvailableJumps) * 1440 + 15)
#print(todayStart - todayMinutes)
#eta += datetime.timedelta(minutes=max(0,todayStart - todayMinutes))
eta = eta.strftime("%H:%M - %d/%m/%Y")
return eta
# print(start,end)
def mouseDown(self, values):
##print(values)
self.startDrag = time.time()
if self.scrollTop[0] <= values.x and values.x <= self.scrollBottom[0] and self.scrollTop[1] <= values.y and \
self.scrollBottom[1] >= values.y and not self.dragging:
self.scrolling = True
elif not self.scrolling:
self.dragging = True
self.dragOffset = [values.x - self.data['window position'][0], values.y - self.data['window position'][1]]
def endDrag(self, values):
self.dragging = False
self.scrolling = False
relX = values.x - self.data['window position'][0]
if time.time() - self.startDrag < 0.3 and values.y - self.data['window position'][1] < 30:
if relX < 150:
pyperclip.copy(self.logReader.currentSystem)
# print('copied ' + self.logReader.currentSystem + ' to clipboard')
elif relX > 190 and relX < 340:
pyperclip.copy(self.nextSystem)
# print('copied ' + self.nextSystem + ' to clipboard')
# more
elif relX > 420 and relX < 440:
self.data['more'] = not self.data['more']
pass
# open route
elif relX > 440 and relX < 463:
self.openFile()
# settings
elif relX > 463 and relX < 485:
self.settings()
pass
# minimise
elif relX > 485 and relX < 500:
self.data['topmost'] = -self.data['topmost'] + 1
self.createWindow()
# close
elif relX > 500 and relX < 520:
self.exiting = True
self.saveData()
elif time.time() - self.startDrag < 0.3 and 15 < relX and 490 > relX and values.y - self.scrollTop[1] > 0 and \
self.scrollBottom[1] - values.y > 0:
proportion = (values.y - self.scrollTop[1]) / self.scrollHeight
clickedOn = proportion * self.scrollLength
pyperclip.copy(self.currentFileData[math.floor(self.position + self.scroll + clickedOn)][
self.currentFileDataKeys['System Name']])
self.clear()
def wheel(self, values):
if self.scrollLength < len(self.currentFileData):
self.scroll += round(-values.delta / 100)
self.limitScroll()
self.clear()
def limitScroll(self):
if self.scroll + self.position < 0:
self.scroll = -self.position
if self.scroll + self.position >= len(self.currentFileData) - self.scrollLength:
self.scroll = len(self.currentFileData) - self.position - self.scrollLength
def hover(self, values):
if not self.dragging and abs(values.y - (self.data['window position'][1] + 80)) < 5:
self.hovering = True
def createWindow(self, onTop=1):
try:
self.root.destroy()
self.window.destroy()
except AttributeError:
pass
except tk._tkinter.TclError:
pass
self.hidden = False
user32 = ctypes.windll.user32
width, height = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)
self.root = tk.Tk()
self.root.title('routeTracker')
self.root.attributes('-alpha', 0.0) # For icon
# self.root.lower()
self.root.iconify()
if self.data['topmost'] == 1:
self.window = tk.Toplevel(self.root, highlightthickness=0)
self.root.iconbitmap(ICON)
setWindowIcon=False
else:
self.window = tk.Tk()
setWindowIcon=True
self.window.title('routeTracker')
self.window.config(bg="pink")
self.window.geometry(str(width) + "x" + str(height)) # Whatever size
#self.root.iconbitmap(ICON)
if self.data['topmost'] == 1:
self.window.overrideredirect(1) # Remove border
self.window.attributes('-topmost', 1)
else:
self.window.wm_attributes('-fullscreen', 'true')
self.root.overrideredirect(1)
self.window.wm_attributes("-transparentcolor", "pink")
self.window.bind('<ButtonPress-1>', self.mouseDown)
self.window.bind('<ButtonRelease-1>', self.endDrag)
self.window.bind("<MouseWheel>", self.wheel)
self.window.bind("<Motion>", self.hover)
self.clear()
if setWindowIcon:
self.window.iconbitmap(ICON)
# settings window
def alarm(self):
self.data['alarm'] = not self.data['alarm']
self.saveData()
self.alarmButton.config(text='Alarm: ' + str(self.data['alarm']))
def logLocation(self):
self.data['logLocation'] = askdirectory()
# print(self.data['logLocation'])
if self.data['logLocation'] != '':
self.logReader.folderLocation = self.data['logLocation']
else:
self.logReader.defaultLocation()
self.saveData()
self.logLocationLabel.config(text=self.logReader.folderLocation)
def change(self, values):
value = self.carrierGoodsEntry.get()
try:
value = int(value)
self.data['carrierCargo'] = value
except ValueError:
pass
value = self.shipGoodsEntry.get()
try:
value = int(value)
self.data['shipCargo'] = value
except ValueError:
pass
jumpStart = self.jumpStartEntry.get()
try:
check = jumpStart.split(':')
check[0] = int(check[0])
check[1] = int(check[1])
if 0 <= check[0] and check[0] < 24 and 0 <= check[1] and check[1] < 60 and len(check) == 2:
self.data['jumpStart'] = jumpStart
except ValueError:
pass
except IndexError:
pass
jumpEnd = self.jumpEndEntry.get()
try:
check = jumpEnd.split(':')
check[0] = int(check[0])
check[1] = int(check[1])
if 0 <= check[0] and check[0] < 24 and 0 <= check[1] and check[1] < 60 and len(check) == 2:
self.data['jumpEnd'] = jumpEnd
except ValueError:
pass
except IndexError:
pass
self.saveData()
def settings(self):
try:
self.settingsWindow.destroy()
except AttributeError:
pass
except tk._tkinter.TclError:
pass
# print('settings window does not yet exist')
self.settingsWindow = tk.Tk()
self.settingsWindow.title('Settings')
self.settingsWindow.config(bg='black')
settingsLabel = tk.Label(self.settingsWindow, text='Settings\n', font="Ebrima 15 bold", fg='orange', bg='black')
settingsLabel.grid(row=0, column=0, columnspan=2)
# log reader file path
openBrowserButton = tk.Button(self.settingsWindow,
text='Log File Location',
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#222222',
activebackground='#111111',
width=25,
command=self.logLocation)
openBrowserButton.grid(row=1, column=0)
self.logLocationLabel = tk.Label(self.settingsWindow, text=self.logReader.folderLocation, font="Ebrima 15 bold",
fg='orange', bg='black')
self.logLocationLabel.grid(row=1, column=1)
# alarm
self.alarmButton = tk.Button(self.settingsWindow,
text='Alarm: ' + str(self.data['alarm']),
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#333333',
activebackground='#222222',
width=25,
command=self.alarm)
self.alarmButton.grid(row=2, column=0)
# non tritium goods in carrier
carrierGoods = tk.Button(self.settingsWindow,
text='Carrier Goods',
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#222222',
activebackground='#111111',
width=25,
)
carrierGoods.grid(row=3, column=0)
self.carrierGoodsEntry = tk.Entry(self.settingsWindow, bg='#222222', fg='orange', bd=0, font="Ebrima 13 bold")
self.carrierGoodsEntry.insert(0, str(self.data['carrierCargo']))
self.carrierGoodsEntry.grid(row=3, column=1)
# non tritium goods in ship
shipGoods = tk.Button(self.settingsWindow,
text='Ship Goods',
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#333333',
activebackground='#222222',
width=25,
)
shipGoods.grid(row=4, column=0)
self.shipGoodsEntry = tk.Entry(self.settingsWindow, bg='#222222', fg='orange', bd=0, font="Ebrima 13 bold")
self.shipGoodsEntry.insert(0, str(self.data['shipCargo']))
self.shipGoodsEntry.grid(row=4, column=1)
# jump start/end
jumpStart = tk.Button(self.settingsWindow,
text='Start Time',
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#333333',
activebackground='#222222',
width=25,
)
jumpStart.grid(row=5, column=0)
self.jumpStartEntry = tk.Entry(self.settingsWindow, bg='#222222', fg='orange', bd=0, font="Ebrima 13 bold")
self.jumpStartEntry.insert(0, str(self.data['jumpStart']))
self.jumpStartEntry.grid(row=5, column=1)
jumpEnd = tk.Button(self.settingsWindow,
text='End Time',
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#333333',
activebackground='#222222',
width=25,
)
jumpEnd.grid(row=6, column=0)
self.jumpEndEntry = tk.Entry(self.settingsWindow, bg='#222222', fg='orange', bd=0, font="Ebrima 13 bold")
self.jumpEndEntry.insert(0, str(self.data['jumpEnd']))
self.jumpEndEntry.grid(row=6, column=1)
# Thanks
invite = tk.Button(self.settingsWindow,
text="With thanks to the Fleet Carrier Owner's Club (Especially Ed, NalloVint and Brandstaetter)",
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#222222',
activebackground='#111111',
width=90,
command=lambda: webbrowser.open('https://discord.gg/tcMPHfh'))
invite.grid(row=7, column=0, columnspan=2)
self.settingsWindow.iconbitmap(ICON)
self.settingsWindow.bind("<KeyRelease>", self.change)
if __name__ == '__main__':
from logReader import *
reader = logReader()
ui = UserInterface(reader=reader)
# print('t')
ui.getETA(18)
ui.mainLoop()
##print(countdownMessage)
# window.mainloop()
self.scrollTop = [0, 0]
self.scrollBottom = [0, 0]
try:
with open("trackerData.txt", "rb") as f:
self.data = pickle.load(f)
except FileNotFoundError:
self.data = dataTemplate
with open("trackerData.txt", "wb") as f:
pickle.dump(self.data, f)
added = False
dataKeys = list(self.data.keys())
for i in list(dataTemplate.keys()):
if i not in dataKeys:
self.data[i] = dataTemplate[i]
added = True
if added:
with open("trackerData.txt", "wb") as f:
pickle.dump(self.data, f)
if "current file" in list(self.data.keys()):
self.currentFile = self.data["current file"]
self.openFile(dialogue=False)
if self.data['logLocation'] != '':
self.logReader.folderLocation = self.data['logLocation']
self.createWindow()
def mainLoop(self):
timeLoop = time.time()
while True:
time.sleep(0.01)
try:
pyperclip.paste()
if self.exiting:
self.saveData()
self.window.destroy()
self.root.destroy()
try:
self.settingsWindow.destroy()
except AttributeError:
pass
except tk._tkinter.TclError:
pass
break
# self.menu.update()
currentTime = time.time()
if currentTime - self.logStart > self.logCheck and self.currentFileData != None:
self.logStart = currentTime
self.logReader.updateLog()
# print(self.logReader.oldSystem,self.logReader.currentSystem)
if self.logReader.oldSystem != self.logReader.currentSystem:
# print("Jumped to "+self.logReader.currentSystem)
self.nextSystem = 'unknown'
for i in range(self.position, len(self.currentFileData) - 1):
##print(i)
##print(ui.currentFileData[i])
if self.currentFileData[i][self.currentFileDataKeys['System Name']] == self.logReader.currentSystem:
# print('copied ' + self.nextSystem + ' to clipboard')
if self.currentFileData[i + 1][self.currentFileDataKeys['System Name']] == \
self.currentFileData[i][self.currentFileDataKeys['System Name']]:
self.position = i + 1
# print('double')
else:
self.position = i
self.nextSystem = self.currentFileData[self.position + 1][
self.currentFileDataKeys['System Name']]
pyperclip.copy(self.nextSystem)
self.data['route positions'][self.currentFile] = self.position
self.saveData()
# try:
self.clear()
"""
except Exception as e:
#print(e)"""
break
# try:
self.root.update()
x, y = mousePosition()
if self.hovering:
self.clear()
self.hovering = False
if self.dragging:
self.data['window position'] = [x - self.dragOffset[0], y - self.dragOffset[1]]
self.clear()
elif self.scrolling and self.scrollLength < len(self.currentFileData):
proportion = (y - self.barCentre - self.scrollTop[1]) / self.scrollHeight
self.scroll = round(proportion * len(self.currentFileData) - self.position)
self.limitScroll()
self.clear()
elif currentTime - timeLoop > 1:
self.clear()
timeLoop = currentTime
"""
if self.data['topmost'] == 0:
if not self.window.focus_displayof():
if topSet != 0:
self.window.attributes('-topmost', 0)
topSet=0
elif topSet != 1:
self.window.attributes('-topmost', 1)
topSet=1
##print(topSet)
"""
"""
except Exception as e:
if e == SystemExit:
break
else:
self.exiting=True
#print(e)"""
try:
self.settingsWindow.update()
except AttributeError:
pass
except tk._tkinter.TclError:
pass
except pyperclip.PyperclipWindowsException:
time.sleep(2)
def openFile(self, dialogue=True):
self.scroll = 0
if dialogue:
self.currentFile = askopenfilename()
self.data["current file"] = self.currentFile
if self.currentFile != '':
# print(self.currentFile)
# print(self.data)
if self.currentFile in list(self.data['route positions'].keys()):
self.position = self.data['route positions'][self.currentFile]
else:
self.position = 0
self.data['route positions'][self.currentFile] = self.position
self.saveData()
try:
with open(self.currentFile, 'r') as f:
self.currentFileData = f.read()
self.currentFileData = "".join(self.currentFileData.split("\""))
self.currentFileData = self.currentFileData.split("\n")
self.currentFileData = [i.split(",") for i in self.currentFileData]
##print(currentFileData)
self.currentFileDataKeys = {}
for i in range(len(self.currentFileData[0])):
self.currentFileDataKeys[self.currentFileData[0][i]] = i
del self.currentFileData[0]
if [''] in self.currentFileData:
self.currentFileData.remove([''])
self.stopLocations = []
for i in range(len(self.currentFileData) - 1):
if self.currentFileData[i][self.currentFileDataKeys['System Name']] == self.currentFileData[i + 1][
self.currentFileDataKeys['System Name']]:
self.stopLocations.append(i)
##print(self.currentFileData[i])
##print(self.stopLocations)
except FileNotFoundError as e:
messagebox.showerror("Import Error", e)
if self.data['showType'] == 'show':
self.logReader.resetValues()
self.logStart = 0
self.createWindow()
def saveData(self, values=None):
with open("trackerData.txt", "wb") as f:
pickle.dump(self.data, f)
# overlay functions
def clear(self):
# all to change with new UI
try:
self.canvas.destroy()
except:
pass
clip = pyperclip.paste()
x, y = self.data['window position'][0], self.data['window position'][1]
self.canvas = tk.Canvas(self.window, bg="pink", bd=0, highlightthickness=0, relief='ridge')
self.canvas.pack(fill="both", expand=True)
self.canvas.create_rectangle(x, y, x + 520, y + 30, fill='black')
if self.logReader.currentSystem == clip:
self.canvas.create_text(x + 5, y + 5, text=self.logReader.currentSystem, font="Ebrima 13 bold",
fill='green', anchor='nw')
else:
self.canvas.create_text(x + 5, y + 5, text=self.logReader.currentSystem, font="Ebrima 13 bold",
fill='orange', anchor='nw')
self.canvas.create_rectangle(x + 150, y, x + 500, y + 30, fill='black')
self.canvas.create_text(x + 158, y + 5, text='>> ', font="Ebrima 13 bold", fill='orange', anchor='nw')
if self.nextSystem == clip:
self.canvas.create_text(x + 190, y + 5, text=self.nextSystem, font="Ebrima 13 bold", fill='green',
anchor='nw')
else:
self.canvas.create_text(x + 190, y + 5, text=self.nextSystem, font="Ebrima 13 bold", fill='orange',
anchor='nw')
self.canvas.create_rectangle(x + 340, y, x + 500, y + 30, fill='black')
timeSince = time.time() - self.logReader.lastJumpRequest
timeSince = self.maxCountdown - timeSince
if timeSince > 0:
if timeSince < 10 and self.data['alarm']:
winsound.Beep(3000, 100)
mins = str(round(timeSince // 60))
seconds = str(math.floor(timeSince % 60))
if len(mins) == 1:
mins = '0' + mins
if len(seconds) == 1:
seconds = '0' + seconds
text = mins + ':' + seconds
else:
text = 'Ready'
text = '| ' + text + ' |'
self.canvas.create_text(x + 350, y + 5, text=text, font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 420, y + 5, text='☰', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 440, y + 5, text='📁', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 463, y + 5, text='⚙', font="Ebrima 13 bold", fill='orange', anchor='nw')
if self.data['topmost'] == 1:
self.canvas.create_text(x + 485, y + 5, text='⮝', font="Ebrima 13 bold", fill='orange', anchor='nw')
else:
self.canvas.create_text(x + 485, y + 5, text='⮟', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 500, y + 5, text='✘', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_line(x, y, x + 520, y, fill='orange')
self.canvas.create_line(x, y + 30, x + 520, y + 30, fill='orange')
if self.data['more']:
self.createDashboard()
def createDashboard(self):
mouseX, mouseY = mousePosition()
x, y = self.data['window position'][0], self.data['window position'][1]
try:
self.canvas.create_rectangle(x, y + 35, x + 520, y + 600, fill='black', outline='orange')
# pannel backgrounds
self.canvas.create_rectangle(x + 10, y + 40, x + 510, y + 150, fill='#111111', outline='#333333')
self.canvas.create_rectangle(x + 10, y + 160, x + 510, y + 270, fill='#111111', outline='#333333')
self.canvas.create_rectangle(x + 10, y + 280, x + 510, y + 540, fill='#111111', outline='#333333')
above = False
for i in [0] + self.stopLocations:
horPos = i / len(self.currentFileData) * 480 + 20
if above and (mouseX - (x + horPos)) ** 2 + (mouseY - (y + 80)) ** 2 < 25:
if horPos < 250:
anchor = 'w'
else:
anchor = 'e'
self.canvas.create_rectangle(x + horPos - 8, y + 45, x + 500, y + 80, fill='#111111',
outline='#111111')
self.canvas.create_line(x + horPos, y + 70, x + horPos, y + 80, fill='orange')
jumps=i - self.position
if jumps > 0:
eta = ' | ' + self.getETA(jumps=jumps)
else:
eta=''
self.canvas.create_text(x + horPos, y + 60,
text=self.currentFileData[i][self.currentFileDataKeys['System Name']] + eta,
font="Ebrima 10 bold", fill='orange', anchor=anchor)
self.canvas.create_oval(x + horPos - 5, y + 75, x + horPos + 5, y + 85, fill='orange',
outline='orange')
elif not above:
self.canvas.create_rectangle(x + horPos - 8, y + 80, x + 500, y + 120, fill='#111111',
outline='#111111')
self.canvas.create_line(x + horPos, y + 80, x + horPos, y + 90, fill='orange')
self.canvas.create_text(x + horPos, y + 95,
text=self.currentFileData[i][self.currentFileDataKeys['System Name']],
font="Ebrima 10 bold", fill='orange', anchor='w')
above = True
horPos = 500
jumps = len(self.currentFileData) - 1 - self.position
if jumps > 0:
eta = ' | ' + self.getETA(jumps=jumps)
else:
eta = ''
self.canvas.create_rectangle(x + horPos - 10, y + 80, x + 500, y + 120, fill='#111111', outline='#111111')
self.canvas.create_line(x + horPos, y + 80, x + horPos, y + 90, fill='orange')
self.canvas.create_text(x + horPos, y + 95,
text=" " + self.currentFileData[-1][self.currentFileDataKeys['System Name']] + eta,
font="Ebrima 10 bold", fill='orange', anchor='e')
##print(self.stopLocations)
horPos = self.position / len(self.currentFileData) * 480 + 20
self.canvas.create_line(x + 20, y + 80, x + horPos, y + 80, fill='orange', width=2, dash=10)
self.canvas.create_line(x + horPos, y + 80, x + 500, y + 80, fill='orange', width=2)
self.canvas.create_oval(x + 15, y + 75, x + 25, y + 85, fill='orange', outline='orange')
self.canvas.create_oval(x + 495, y + 75, x + 505, y + 85, fill='orange', outline='orange')
self.canvas.create_text(x + 20, y + 130, text="Jumps | Completed: " + str(self.position),
font="Ebrima 13 bold", fill='orange', anchor='w')
found = False
for i in self.stopLocations:
diff = i - self.position
if diff >= 0:
self.canvas.create_text(x + 220, y + 130, text="| To Waypoint: " + str(diff), font="Ebrima 13 bold",
fill='orange', anchor='w')
found = True
break
if not found:
self.canvas.create_text(x + 220, y + 130,
text="| To Waypoint: " + str(len(self.currentFileData) - self.position - 1),
font="Ebrima 13 bold", fill='orange', anchor='w')
self.canvas.create_text(x + 380, y + 130,
text="| Left: " + str(len(self.currentFileData) - self.position - 1),
font="Ebrima 13 bold", fill='orange', anchor='w')
for i in self.stopLocations:
if i < self.position:
fill = 'orange'
outline = 'orange'
else:
fill = 'orange'
outline = 'orange'
horPos = i / len(self.currentFileData) * 480 + 20
self.canvas.create_oval(x + horPos - 3, y + 77, x + horPos + 3, y + 83, fill=fill, outline=outline)
##print('h',horPos)
##print(self.stopLocations)
horPos = self.position / len(self.currentFileData) * 480 + 20
self.canvas.create_polygon(x + horPos - 5, y + 85, x + horPos, y + 75, x + horPos + 5, y + 85,
fill='#00ff00', outline='#00ff00')
try:
reqFuel = self.currentFileData[self.position][self.currentFileDataKeys['Tritium in market']]
reqFuel = int(reqFuel)
if reqFuel > 0:
reqFuel += 1000
else:
for i in range(self.position, len(self.currentFileData)):
reqFuel += int(self.currentFileData[i][self.currentFileDataKeys['Fuel Used']])
reqFuel -= int(self.currentFileData[self.position][self.currentFileDataKeys['Fuel Used']])
except IndexError:
reqFuel = 'Error'
tankFuel = self.logReader.carrierFuel
shipFuel = self.logReader.shipInventory - self.data['shipCargo']
carrierFuel = self.logReader.carrierInventory - self.data['carrierCargo']
self.canvas.create_text(x + 20, y + 180, text="Tritium | ", font="Ebrima 13 bold", fill='orange',
anchor='w')
self.canvas.create_text(x + 95, y + 180, text="Tank: " + str(tankFuel), font="Ebrima 13 bold", fill='green',
anchor='w')
self.canvas.create_text(x + 190, y + 180, text="| Ship: " + str(shipFuel), font="Ebrima 13 bold",
fill='blue', anchor='w')
self.canvas.create_text(x + 280, y + 180, text="| Cargo: " + str(carrierFuel), font="Ebrima 13 bold",
fill='orange', anchor='w')
self.canvas.create_text(x + 400, y + 180, text="| Min: " + str(reqFuel), font="Ebrima 13 bold", fill='red',
anchor='w')
fuelTotal = tankFuel + shipFuel + carrierFuel
if reqFuel == 'Error':
reqFuel = 0
width = max(fuelTotal, reqFuel) / 480
self.canvas.create_rectangle(x + 20, y + 210, x + 20 + reqFuel / width, y + 230, fill='red', outline='red',
stipple='gray25')
self.canvas.create_rectangle(x + 20, y + 210, x + 20 + tankFuel / width, y + 230, fill='green',
outline='green')
self.canvas.create_rectangle(x + 20 + tankFuel / width, y + 210,
x + 20 + shipFuel / width + tankFuel / width, y + 230, fill='blue',
outline='blue')
self.canvas.create_rectangle(x + 20 + shipFuel / width + tankFuel / width, y + 210,
x + 20 + shipFuel / width + tankFuel / width + carrierFuel / width, y + 230,
fill='orange', outline='orange')
self.canvas.create_rectangle(x + 20 + reqFuel / width - 2, y + 210, x + 20 + reqFuel / width, y + 230,
fill='red', outline='red')
diff = fuelTotal - reqFuel
if diff >= 0:
self.canvas.create_text(x + 260, y + 250, text="You are " + str(diff) + " Tritium in excess",
font="Ebrima 13 bold", fill='green')
else:
self.canvas.create_text(x + 260, y + 250, text="Warning! You are " + str(-diff) + " Tritium short!",
font="Ebrima 13 bold", fill='red')
self.canvas.create_text(x + 260, y + 197,
text="Please note you need to open the carrier management page to update this.",
font="Ebrima 8 bold", fill='orange')
# routeList
length = 10
self.scrollLength = length
verticalSpacing = 25
self.verticalSpacing = verticalSpacing
boxHeight = 20
self.boxHeight = boxHeight
startY = 290
self.scrollHeight = verticalSpacing * (length - 1) + boxHeight
barHeight = min(length / len(self.currentFileData) * self.scrollHeight, self.scrollHeight)
self.barCentre = barHeight / 2
barPosition = y + (self.position + self.scroll) / len(self.currentFileData) * self.scrollHeight + startY
clipboard = pyperclip.paste()
for i in range(length):
if self.position + self.scroll + i < len(self.currentFileData):
if self.currentFileData[self.position + self.scroll + i][
self.currentFileDataKeys['System Name']] == clipboard:
boxFill = 'green'
textFill = 'black'
elif self.scroll + i == 0:
boxFill = 'orange'
textFill = 'black'
elif self.position + self.scroll + i in self.stopLocations or self.position + self.scroll + i - 1 in self.stopLocations:
boxFill = 'red'
textFill = 'black'
else:
boxFill = 'black'
textFill = 'orange'
self.canvas.create_rectangle(x + 15, y + startY + verticalSpacing * i, x + 490,
y + startY + verticalSpacing * i + boxHeight, fill=boxFill,
outline='orange')
self.canvas.create_text(x + 17, y + startY + verticalSpacing * i,
text=self.currentFileData[self.position + self.scroll + i][
self.currentFileDataKeys['System Name']], font="Ebrima 12 bold",
fill=textFill, anchor='nw')
#print(self.scroll + i - self.position)
if self.scroll + i > 0:
#print('eta')
eta = self.getETA(jumps=self.scroll + i)
self.canvas.create_text(x + 490, y + startY + verticalSpacing * i,
text=eta, font="Ebrima 12 bold",
fill=textFill, anchor='ne')
self.canvas.create_rectangle(x + 497, y + startY, x + 505, y + startY + self.scrollHeight, fill='black',
outline='orange')
self.scrollTop = [x + 497, y + startY]
self.scrollBottom = [x + 505, y + startY + verticalSpacing * (length - 1) + boxHeight]
self.canvas.create_rectangle(x + 497, barPosition, x + 505, barPosition + barHeight, fill='orange',
outline='orange')
for i in self.stopLocations:
barPosition = y + i / len(self.currentFileData) * self.scrollHeight + startY
self.canvas.create_rectangle(x + 497, barPosition, x + 505, barPosition + 1, fill='red', outline='red')
barPosition = y + self.position / len(self.currentFileData) * self.scrollHeight + startY
self.canvas.create_rectangle(x + 497, barPosition, x + 505, barPosition + 1, fill='orange',
outline='orange')
except Exception as e:
self.canvas.create_rectangle(x, y + 35, x + 520, y + 600, fill='black', outline='orange')
self.canvas.create_text(x + 260, y + 250, text=traceback.format_exc(), font="Ebrima 13 bold", fill='red')
def getETA(self, jumps):
start = self.data['jumpStart']
end = self.data['jumpEnd']
# print(start,end)
start = start.split(':')
end = end.split(':')
start = sum([int(val) * 60 ** (1 - i) for i, val in enumerate(start)])
end = sum([int(val) * 60 ** (1 - i) for i, val in enumerate(end)])
if end < start:
end += 1440
gap = 1440 - (end - start)
alreadyQueued = time.time() - self.logReader.lastJumpRequest
if alreadyQueued < 21 * 60:
#print('from last jump')
now = datetime.datetime.utcfromtimestamp(self.logReader.lastJumpRequest)
#print(now)
else:
now = datetime.datetime.now(datetime.timezone.utc)
nowReset = now - datetime.timedelta(hours=now.hour,minutes=now.minute)
todayMinutes = sum([int(val) * 60 ** (1 - i) for i, val in enumerate([now.hour, now.minute])])
#print(todayMinutes)
#print(start)
if todayMinutes + 1440 < end and todayMinutes < start:
#print('shift')
end -= 1440
start -= 1440
todayStart = max(todayMinutes, start)
#print('ts',todayStart)
todayAvailableJumps = (end - todayStart) // 21 + 1
regularAvailableJumps = (end - start) // 21 + 1
if jumps <= todayAvailableJumps:
eta = nowReset + datetime.timedelta(minutes = todayStart + jumps * 21 - 5)
elif (jumps - todayAvailableJumps) % regularAvailableJumps != 0:
eta = nowReset + datetime.timedelta(minutes = 1440 + start + (jumps - todayAvailableJumps) // regularAvailableJumps * 1440 + ((
jumps - todayAvailableJumps) % regularAvailableJumps) * 21 - 5)
else:
eta = nowReset + datetime.timedelta(
minutes=1440 + start + ((jumps - todayAvailableJumps) // regularAvailableJumps) * 1440 + 15)
#print(todayStart - todayMinutes)
#eta += datetime.timedelta(minutes=max(0,todayStart - todayMinutes))
eta = eta.strftime("%H:%M - %d/%m/%Y")
return eta
# print(start,end)
def mouseDown(self, values):
##print(values)
self.startDrag = time.time()
if self.scrollTop[0] <= values.x and values.x <= self.scrollBottom[0] and self.scrollTop[1] <= values.y and \
self.scrollBottom[1] >= values.y and not self.dragging:
self.scrolling = True
elif not self.scrolling:
self.dragging = True
self.dragOffset = [values.x - self.data['window position'][0], values.y - self.data['window position'][1]]
def endDrag(self, values):
self.dragging = False
self.scrolling = False
relX = values.x - self.data['window position'][0]
if time.time() - self.startDrag < 0.3 and values.y - self.data['window position'][1] < 30:
if relX < 150:
pyperclip.copy(self.logReader.currentSystem)
# print('copied ' + self.logReader.currentSystem + ' to clipboard')
elif relX > 190 and relX < 340:
pyperclip.copy(self.nextSystem)
# print('copied ' + self.nextSystem + ' to clipboard')
# more
elif relX > 420 and relX < 440:
self.data['more'] = not self.data['more']
pass
# open route
elif relX > 440 and relX < 463:
self.openFile()
# settings
elif relX > 463 and relX < 485:
self.settings()
pass
# minimise
elif relX > 485 and relX < 500:
self.data['topmost'] = -self.data['topmost'] + 1
self.createWindow()
# close
elif relX > 500 and relX < 520:
self.exiting = True
self.saveData()
elif time.time() - self.startDrag < 0.3 and 15 < relX and 490 > relX and values.y - self.scrollTop[1] > 0 and \
self.scrollBottom[1] - values.y > 0:
proportion = (values.y - self.scrollTop[1]) / self.scrollHeight
clickedOn = proportion * self.scrollLength
pyperclip.copy(self.currentFileData[math.floor(self.position + self.scroll + clickedOn)][
self.currentFileDataKeys['System Name']])
self.clear()
def wheel(self, values):
if self.scrollLength < len(self.currentFileData):
self.scroll += round(-values.delta / 100)
self.limitScroll()
self.clear()
def limitScroll(self):
if self.scroll + self.position < 0:
self.scroll = -self.position
if self.scroll + self.position >= len(self.currentFileData) - self.scrollLength:
self.scroll = len(self.currentFileData) - self.position - self.scrollLength
def hover(self, values):
if not self.dragging and abs(values.y - (self.data['window position'][1] + 80)) < 5:
self.hovering = True
def createWindow(self, onTop=1):
try:
self.root.destroy()
self.window.destroy()
except AttributeError:
pass
except tk._tkinter.TclError:
pass
self.hidden = False
user32 = ctypes.windll.user32
width, height = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)
self.root = tk.Tk()
self.root.title('routeTracker')
self.root.attributes('-alpha', 0.0) # For icon
# self.root.lower()
self.root.iconify()
if self.data['topmost'] == 1:
self.window = tk.Toplevel(self.root, highlightthickness=0)
self.root.iconbitmap(ICON)
setWindowIcon=False
else:
self.window = tk.Tk()
setWindowIcon=True
self.window.title('routeTracker')
self.window.config(bg="pink")
self.window.geometry(str(width) + "x" + str(height)) # Whatever size
#self.root.iconbitmap(ICON)
if self.data['topmost'] == 1:
self.window.overrideredirect(1) # Remove border
self.window.attributes('-topmost', 1)
else:
self.window.wm_attributes('-fullscreen', 'true')
self.root.overrideredirect(1)
self.window.wm_attributes("-transparentcolor", "pink")
self.window.bind('<ButtonPress-1>', self.mouseDown)
self.window.bind('<ButtonRelease-1>', self.endDrag)
self.window.bind("<MouseWheel>", self.wheel)
self.window.bind("<Motion>", self.hover)
self.clear()
if setWindowIcon:
self.window.iconbitmap(ICON)
# settings window
def alarm(self):
self.data['alarm'] = not self.data['alarm']
self.saveData()
self.alarmButton.config(text='Alarm: ' + str(self.data['alarm']))
def logLocation(self):
self.data['logLocation'] = askdirectory()
# print(self.data['logLocation'])
if self.data['logLocation'] != '':
self.logReader.folderLocation = self.data['logLocation']
else:
self.logReader.defaultLocation()
self.saveData()
self.logLocationLabel.config(text=self.logReader.folderLocation)
def change(self, values):
value = self.carrierGoodsEntry.get()
try:
value = int(value)
self.data['carrierCargo'] = value
except ValueError:
pass
value = self.shipGoodsEntry.get()
try:
value = int(value)
self.data['shipCargo'] = value
except ValueError:
pass
jumpStart = self.jumpStartEntry.get()
try:
check = jumpStart.split(':')
check[0] = int(check[0])
check[1] = int(check[1])
if 0 <= check[0] and check[0] < 24 and 0 <= check[1] and check[1] < 60 and len(check) == 2:
self.data['jumpStart'] = jumpStart
except ValueError:
pass
except IndexError:
pass
jumpEnd = self.jumpEndEntry.get()
try:
check = jumpEnd.split(':')
check[0] = int(check[0])
check[1] = int(check[1])
if 0 <= check[0] and check[0] < 24 and 0 <= check[1] and check[1] < 60 and len(check) == 2:
self.data['jumpEnd'] = jumpEnd
except ValueError:
pass
except IndexError:
pass
self.saveData()
def settings(self):
try:
self.settingsWindow.destroy()
except AttributeError:
pass
except tk._tkinter.TclError:
pass
# print('settings window does not yet exist')
self.settingsWindow = tk.Tk()
self.settingsWindow.title('Settings')
self.settingsWindow.config(bg='black')
settingsLabel = tk.Label(self.settingsWindow, text='Settings\n', font="Ebrima 15 bold", fg='orange', bg='black')
settingsLabel.grid(row=0, column=0, columnspan=2)
# log reader file path
openBrowserButton = tk.Button(self.settingsWindow,
text='Log File Location',
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#222222',
activebackground='#111111',
width=25,
command=self.logLocation)
openBrowserButton.grid(row=1, column=0)
self.logLocationLabel = tk.Label(self.settingsWindow, text=self.logReader.folderLocation, font="Ebrima 15 bold",
fg='orange', bg='black')
self.logLocationLabel.grid(row=1, column=1)
# alarm
self.alarmButton = tk.Button(self.settingsWindow,
text='Alarm: ' + str(self.data['alarm']),
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#333333',
activebackground='#222222',
width=25,
command=self.alarm)
self.alarmButton.grid(row=2, column=0)
# non tritium goods in carrier
carrierGoods = tk.Button(self.settingsWindow,
text='Carrier Goods',
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#222222',
activebackground='#111111',
width=25,
)
carrierGoods.grid(row=3, column=0)
self.carrierGoodsEntry = tk.Entry(self.settingsWindow, bg='#222222', fg='orange', bd=0, font="Ebrima 13 bold")
self.carrierGoodsEntry.insert(0, str(self.data['carrierCargo']))
self.carrierGoodsEntry.grid(row=3, column=1)
# non tritium goods in ship
shipGoods = tk.Button(self.settingsWindow,
text='Ship Goods',
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#333333',
activebackground='#222222',
width=25,
)
shipGoods.grid(row=4, column=0)
self.shipGoodsEntry = tk.Entry(self.settingsWindow, bg='#222222', fg='orange', bd=0, font="Ebrima 13 bold")
self.shipGoodsEntry.insert(0, str(self.data['shipCargo']))
self.shipGoodsEntry.grid(row=4, column=1)
# jump start/end
jumpStart = tk.Button(self.settingsWindow,
text='Start Time',
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#333333',
activebackground='#222222',
width=25,
)
jumpStart.grid(row=5, column=0)
self.jumpStartEntry = tk.Entry(self.settingsWindow, bg='#222222', fg='orange', bd=0, font="Ebrima 13 bold")
self.jumpStartEntry.insert(0, str(self.data['jumpStart']))
self.jumpStartEntry.grid(row=5, column=1)
jumpEnd = tk.Button(self.settingsWindow,
text='End Time',
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#333333',
activebackground='#222222',
width=25,
)
jumpEnd.grid(row=6, column=0)
self.jumpEndEntry = tk.Entry(self.settingsWindow, bg='#222222', fg='orange', bd=0, font="Ebrima 13 bold")
self.jumpEndEntry.insert(0, str(self.data['jumpEnd']))
self.jumpEndEntry.grid(row=6, column=1)
# Thanks
invite = tk.Button(self.settingsWindow,
text="With thanks to the Fleet Carrier Owner's Club (Especially Ed, NalloVint and Brandstaetter)",
font="Ebrima 13 bold",
fg='orange',
activeforeground='orange',
bg='#222222',
activebackground='#111111',
width=90,
command=lambda: webbrowser.open('https://discord.gg/tcMPHfh'))
invite.grid(row=7, column=0, columnspan=2)
self.settingsWindow.iconbitmap(ICON)
self.settingsWindow.bind("<KeyRelease>", self.change)
if __name__ == '__main__':
from logReader import *
reader = logReader()
ui = UserInterface(reader=reader)
# print('t')
ui.getETA(18)
ui.mainLoop()
##print(countdownMessage)
# window.mainloop()
| 43.692634
| 141
| 0.481455
| 7,912
| 82,448
| 4.996461
| 0.064206
| 0.034655
| 0.05302
| 0.028331
| 0.965041
| 0.96193
| 0.959729
| 0.95601
| 0.953051
| 0.953051
| 0
| 0.04377
| 0.404776
| 82,448
| 1,886
| 142
| 43.715801
| 0.761528
| 0.033439
| 0
| 0.936484
| 0
| 0.094535
| 0.077708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.025849
| 0.013294
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0ac0c3467e25e61d29d2fbdd76a179f64537e1b2
| 10,804
|
py
|
Python
|
applications/medical_history/tests/test_views.py
|
szypkiwonsz/Physiotherapy-Management-System
|
36decab47890e2f4be259c8796f47324ffad28fe
|
[
"MIT"
] | null | null | null |
applications/medical_history/tests/test_views.py
|
szypkiwonsz/Physiotherapy-Management-System
|
36decab47890e2f4be259c8796f47324ffad28fe
|
[
"MIT"
] | 8
|
2020-08-17T14:36:02.000Z
|
2022-03-12T00:33:50.000Z
|
applications/medical_history/tests/test_views.py
|
szypkiwonsz/Physiotherapy-Management-System
|
36decab47890e2f4be259c8796f47324ffad28fe
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from django.test import TestCase, Client
from django.urls import reverse
from django.utils import timezone
from applications.appointments.models import Appointment, Service
from applications.medical_history.models import MedicalHistory
from applications.office_panel.models import Patient
from applications.users.models import User, UserOffice
class TestOfficeMedicalHistoryViews(TestCase):
def setUp(self):
self.client = Client()
self.medical_history_url = reverse('office_panel:medical_history:list')
self.make_medical_history_url = reverse('office_panel:medical_history:make')
self.detail_medical_history_url = reverse('office_panel:medical_history:detail', args=[1])
self.update_medical_history_url = reverse('office_panel:medical_history:update', args=[1])
self.delete_medical_history_url = reverse('office_panel:medical_history:delete', args=[1])
self.patient1 = User.objects.create_user(
'patient', 'patient@gmail.com', 'patientpassword', is_patient=True
)
self.office1 = User.objects.create_user(
'office', 'office@gmail.com', 'officepassword', is_office=True
)
self.office_patient1 = Patient.objects.create(
owner=self.office1,
first_name='firstname',
last_name='lastname',
email='patient@gmail.com'
)
self.medical_history1 = MedicalHistory.objects.create(
owner=self.office1,
patient=self.office_patient1,
description='description',
recommendations='recommendations',
date_selected=timezone.now(),
)
self.appointment_office1 = UserOffice.objects.create(
user=self.office1,
name='name',
address='address',
city='City',
phone_number='000000000',
website='www.website.com'
)
self.service = Service.objects.create(
office=self.appointment_office1,
name='Konsultacja',
duration=10
)
self.appointment1 = Appointment.objects.create(
owner=self.patient1,
office=self.appointment_office1,
date=datetime(2012, 1, 13, 16, 00, 00),
first_name='Kacper',
last_name='Sawicki',
date_selected=datetime(2012, 1, 13, 23, 51, 34),
phone_number='000000000',
confirmed=False,
service=self.service
)
def test_medical_history_list_GET_not_logged_in(self):
response = self.client.get(self.medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history.html')
def test_medical_history_list_GET_logged_as_patient(self):
self.client.login(username='patient@gmail.com', password='patientpassword')
response = self.client.get(self.medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history.html')
def test_medical_history_list_GET_logged_as_office(self):
self.client.login(username='office@gmail.com', password='officepassword')
response = self.client.get(self.medical_history_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'medical_history/office/medical_history.html')
def test_make_medical_history_create_GET_not_logged_in(self):
response = self.client.get(self.make_medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history_detail_form.html')
def test_make_medical_history_create_GET_logged_as_patient(self):
self.client.login(username='patient@gmail.com', password='patientpassword')
response = self.client.get(self.make_medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history_detail_form.html')
def test_make_medical_history_create_GET_logged_as_office(self):
self.client.login(username='office@gmail.com', password='officepassword')
response = self.client.get(self.make_medical_history_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history_detail_form.html')
def test_make_medical_history_create_POST(self):
self.client.login(username='office@gmail.com', password='officepassword')
response = self.client.post(self.make_medical_history_url, {
'patient': self.office_patient1.pk,
'appointment': self.appointment1.pk,
'description': 'description',
'recommendations': 'recommendations'
})
medical_history2 = MedicalHistory.objects.get(id=2)
self.assertEquals(response.status_code, 302)
self.assertEquals(medical_history2.description, 'description')
def test_medical_history_detail_GET_not_logged_in(self):
response = self.client.get(self.detail_medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history_detail_form.html')
def test_medical_history_detail_GET_logged_as_patient(self):
self.client.login(username='patient@gmail.com', password='patientpassword')
response = self.client.get(self.detail_medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history_detail_form.html')
def test_medical_history_detail_GET_logged_as_office(self):
self.client.login(username='office@gmail.com', password='officepassword')
response = self.client.get(self.detail_medical_history_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'medical_history/office/medical_history_detail_form.html')
def test_medical_history_update_GET_not_logged_in(self):
response = self.client.get(self.update_medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history_update_form.html')
def test_medical_history_update_GET_logged_as_patient(self):
self.client.login(username='patient@gmail.com', password='patientpassword')
response = self.client.get(self.update_medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history_update_form.html')
def test_medical_history_update_GET_logged_as_office(self):
self.client.login(username='office@gmail.com', password='officepassword')
response = self.client.get(self.update_medical_history_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'medical_history/office/medical_history_update_form.html')
def test_medical_history_update_POST(self):
self.client.login(username='office@gmail.com', password='officepassword')
response = self.client.post(self.update_medical_history_url, {
'patient': self.medical_history1.patient.pk,
'appointment': self.appointment1.pk,
'description': self.medical_history1.description,
'recommendations': 'newrecommendations'
})
medical_history_update = MedicalHistory.objects.get(id=1)
self.assertEquals(response.status_code, 302)
self.assertEquals(medical_history_update.recommendations, 'newrecommendations')
def test_medical_history_delete_GET_not_logged_in(self):
response = self.client.get(self.delete_medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history_delete_confirm.html')
def test_medical_history_delete_GET_logged_as_patient(self):
self.client.login(username='patient@gmail.com', password='patientpassword')
response = self.client.get(self.delete_medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/office/medical_history_delete_confirm.html')
def test_medical_history_delete_GET_logged_as_office(self):
self.client.login(username='office@gmail.com', password='officepassword')
response = self.client.get(self.delete_medical_history_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'medical_history/office/medical_history_delete_confirm.html')
def test_medical_history_delete_POST(self):
self.client.login(username='office@gmail.com', password='officepassword')
response_with_post = self.client.get(self.detail_medical_history_url)
self.assertEquals(response_with_post.status_code, 200)
response = self.client.post(self.delete_medical_history_url)
response_with_deleted_post = self.client.get(self.detail_medical_history_url)
self.assertEquals(response_with_deleted_post.status_code, 404)
class TestPatientMedicalHistoryViews(TestCase):
def setUp(self):
self.client = Client()
self.medical_history_url = reverse('patient_panel:medical_history:list')
self.patient1 = User.objects.create_user(
'patient', 'patient@gmail.com', 'patientpassword', is_patient=True
)
self.office1 = User.objects.create_user(
'office', 'office@gmail.com', 'officepassword', is_office=True
)
def test_medical_history_GET_not_logged_in(self):
response = self.client.get(self.medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/patient/medical_history.html')
def test_medical_history_GET_logged_as_office(self):
self.client.login(username='office@gmail.com', password='officepassword')
response = self.client.get(self.medical_history_url)
self.assertEquals(response.status_code, 302)
self.assertTemplateNotUsed(response, 'medical_history/patient/medical_history.html')
def test_medical_history_GET_logged_as_patient(self):
self.client.login(username='patient@gmail.com', password='patientpassword')
response = self.client.get(self.medical_history_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'medical_history/patient/medical_history.html')
| 46.973913
| 106
| 0.727231
| 1,234
| 10,804
| 6.079417
| 0.090762
| 0.177286
| 0.065716
| 0.045321
| 0.805918
| 0.771394
| 0.749667
| 0.746334
| 0.712877
| 0.695415
| 0
| 0.015606
| 0.175583
| 10,804
| 229
| 107
| 47.179039
| 0.826653
| 0
| 0
| 0.483516
| 0
| 0
| 0.188819
| 0.105054
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.126374
| false
| 0.104396
| 0.043956
| 0
| 0.181319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
7c1d59a937cc24395907fc29662cf9001dae8e62
| 24,142
|
py
|
Python
|
lang/python/github/com/metaprov/modelaapi/services/k8score/v1/k8score_pb2_grpc.py
|
metaprov/modeldapi
|
ee05693832051dcd990ee4f061715d7ae0787340
|
[
"Apache-2.0"
] | 5
|
2022-02-18T03:40:10.000Z
|
2022-03-01T16:11:24.000Z
|
lang/python/github/com/metaprov/modelaapi/services/k8score/v1/k8score_pb2_grpc.py
|
metaprov/modeldapi
|
ee05693832051dcd990ee4f061715d7ae0787340
|
[
"Apache-2.0"
] | 1
|
2022-01-07T19:59:25.000Z
|
2022-02-04T01:21:14.000Z
|
lang/python/github/com/metaprov/modelaapi/services/k8score/v1/k8score_pb2_grpc.py
|
metaprov/modeldapi
|
ee05693832051dcd990ee4f061715d7ae0787340
|
[
"Apache-2.0"
] | 1
|
2022-03-25T10:21:43.000Z
|
2022-03-25T10:21:43.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from github.com.metaprov.modelaapi.services.k8score.v1 import k8score_pb2 as github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2
class CoreK8sServiceStub(object):
"""///////////////////////////// Jobs
The Core K8s service is used to list and get a kubernetes object
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListK8sSecrets = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListK8sSecrets',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListSecretsRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListSecretResponse.FromString,
)
self.GetK8sSecret = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/GetK8sSecret',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetSecretRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetSecretResponse.FromString,
)
self.ListK8sServices = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListK8sServices',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListServicesRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListServicesResponse.FromString,
)
self.GetK8sService = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/GetK8sService',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetServiceRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetServiceResponse.FromString,
)
self.ListK8sDeployments = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListK8sDeployments',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListDeploymentsRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListDeploymentsResponse.FromString,
)
self.GetK8sDeployment = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/GetK8sDeployment',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetDeploymentRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetDeploymentResponse.FromString,
)
self.ListK8sPods = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListK8sPods',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListPodsRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListPodsResponse.FromString,
)
self.GetK8sPod = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/GetK8sPod',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetPodRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetPodResponse.FromString,
)
self.ListK8sJobs = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListK8sJobs',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListJobsRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListJobsResponse.FromString,
)
self.GetK8sJob = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/GetK8sJob',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetJobRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetJobResponse.FromString,
)
self.ListEvents = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListEvents',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListEventsRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListEventsResponse.FromString,
)
class CoreK8sServiceServicer(object):
"""///////////////////////////// Jobs
The Core K8s service is used to list and get a kubernetes object
"""
def ListK8sSecrets(self, request, context):
"""//////////////// secret
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetK8sSecret(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListK8sServices(self, request, context):
"""//////////////// service
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetK8sService(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListK8sDeployments(self, request, context):
"""//////////////// deployment
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetK8sDeployment(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListK8sPods(self, request, context):
"""//////////////// pod
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetK8sPod(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListK8sJobs(self, request, context):
"""////////////// jobs
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetK8sJob(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListEvents(self, request, context):
"""Events
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CoreK8sServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListK8sSecrets': grpc.unary_unary_rpc_method_handler(
servicer.ListK8sSecrets,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListSecretsRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListSecretResponse.SerializeToString,
),
'GetK8sSecret': grpc.unary_unary_rpc_method_handler(
servicer.GetK8sSecret,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetSecretRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetSecretResponse.SerializeToString,
),
'ListK8sServices': grpc.unary_unary_rpc_method_handler(
servicer.ListK8sServices,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListServicesRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListServicesResponse.SerializeToString,
),
'GetK8sService': grpc.unary_unary_rpc_method_handler(
servicer.GetK8sService,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetServiceRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetServiceResponse.SerializeToString,
),
'ListK8sDeployments': grpc.unary_unary_rpc_method_handler(
servicer.ListK8sDeployments,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListDeploymentsRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListDeploymentsResponse.SerializeToString,
),
'GetK8sDeployment': grpc.unary_unary_rpc_method_handler(
servicer.GetK8sDeployment,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetDeploymentRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetDeploymentResponse.SerializeToString,
),
'ListK8sPods': grpc.unary_unary_rpc_method_handler(
servicer.ListK8sPods,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListPodsRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListPodsResponse.SerializeToString,
),
'GetK8sPod': grpc.unary_unary_rpc_method_handler(
servicer.GetK8sPod,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetPodRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetPodResponse.SerializeToString,
),
'ListK8sJobs': grpc.unary_unary_rpc_method_handler(
servicer.ListK8sJobs,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListJobsRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListJobsResponse.SerializeToString,
),
'GetK8sJob': grpc.unary_unary_rpc_method_handler(
servicer.GetK8sJob,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetJobRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetJobResponse.SerializeToString,
),
'ListEvents': grpc.unary_unary_rpc_method_handler(
servicer.ListEvents,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListEventsRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListEventsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class CoreK8sService(object):
"""///////////////////////////// Jobs
The Core K8s service is used to list and get a kubernetes object
"""
@staticmethod
def ListK8sSecrets(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListK8sSecrets',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListSecretsRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListSecretResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetK8sSecret(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/GetK8sSecret',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetSecretRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetSecretResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListK8sServices(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListK8sServices',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListServicesRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListServicesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetK8sService(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/GetK8sService',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetServiceRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetServiceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListK8sDeployments(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListK8sDeployments',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListDeploymentsRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListDeploymentsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetK8sDeployment(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/GetK8sDeployment',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetDeploymentRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetDeploymentResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListK8sPods(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListK8sPods',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListPodsRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListPodsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetK8sPod(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/GetK8sPod',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetPodRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetPodResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListK8sJobs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListK8sJobs',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListJobsRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListJobsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetK8sJob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/GetK8sJob',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetJobRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.GetJobResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListEvents(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.k8score.v1.CoreK8sService/ListEvents',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListEventsRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_k8score_dot_v1_dot_k8score__pb2.ListEventsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 58.455206
| 173
| 0.729268
| 2,538
| 24,142
| 6.420016
| 0.055162
| 0.082239
| 0.049343
| 0.061679
| 0.905057
| 0.905057
| 0.905057
| 0.876764
| 0.873266
| 0.856327
| 0
| 0.021746
| 0.203794
| 24,142
| 412
| 174
| 58.597087
| 0.825929
| 0.041339
| 0
| 0.51462
| 1
| 0
| 0.105867
| 0.077879
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.005848
| 0.032164
| 0.116959
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7cc476a97e9b98a41c4aa644f079f9efd306de3a
| 17,895
|
py
|
Python
|
sdk/python/pulumi_aws/batch/compute_environment.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/batch/compute_environment.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/batch/compute_environment.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class ComputeEnvironment(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The Amazon Resource Name (ARN) of the compute environment.
"""
compute_environment_name: pulumi.Output[str]
compute_environment_name_prefix: pulumi.Output[str]
"""
Creates a unique compute environment name beginning with the specified prefix. Conflicts with `compute_environment_name`.
"""
compute_resources: pulumi.Output[dict]
"""
Details of the compute resources managed by the compute environment. This parameter is required for managed compute environments. See details below.
* `allocationStrategy` (`str`) - The allocation strategy to use for the compute resource in case not enough instances of the best fitting instance type can be allocated. Valid items are `BEST_FIT_PROGRESSIVE`, `SPOT_CAPACITY_OPTIMIZED` or `BEST_FIT`. Defaults to `BEST_FIT`. See [AWS docs](https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html) for details.
* `bidPercentage` (`float`) - Integer of minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that instance type before instances are launched. For example, if your bid percentage is 20% (`20`), then the Spot price must be below 20% of the current On-Demand price for that EC2 instance. This parameter is required for SPOT compute environments.
* `desiredVcpus` (`float`) - The desired number of EC2 vCPUS in the compute environment.
* `ec2KeyPair` (`str`) - The EC2 key pair that is used for instances launched in the compute environment.
* `imageId` (`str`) - The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
* `instanceRole` (`str`) - The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
* `instanceTypes` (`list`) - A list of instance types that may be launched.
* `launch_template` (`dict`) - The launch template to use for your compute resources. See details below.
* `launchTemplateId` (`str`) - ID of the launch template. You must specify either the launch template ID or launch template name in the request, but not both.
* `launchTemplateName` (`str`) - Name of the launch template.
* `version` (`str`) - The version number of the launch template. Default: The default version of the launch template.
* `maxVcpus` (`float`) - The maximum number of EC2 vCPUs that an environment can reach.
* `minVcpus` (`float`) - The minimum number of EC2 vCPUs that an environment should maintain.
* `securityGroupIds` (`list`) - A list of EC2 security group that are associated with instances launched in the compute environment.
* `spotIamFleetRole` (`str`) - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. This parameter is required for SPOT compute environments.
* `subnets` (`list`) - A list of VPC subnets into which the compute resources are launched.
* `tags` (`dict`) - Key-value pair tags to be applied to resources that are launched in the compute environment.
* `type` (`str`) - The type of compute environment. Valid items are `EC2` or `SPOT`.
"""
ecs_cluster_arn: pulumi.Output[str]
"""
The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster used by the compute environment.
"""
service_role: pulumi.Output[str]
"""
The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf.
"""
state: pulumi.Output[str]
"""
The state of the compute environment. If the state is `ENABLED`, then the compute environment accepts jobs from a queue and can scale out automatically based on queues. Valid items are `ENABLED` or `DISABLED`. Defaults to `ENABLED`.
"""
status: pulumi.Output[str]
"""
The current status of the compute environment (for example, CREATING or VALID).
"""
status_reason: pulumi.Output[str]
"""
A short, human-readable string to provide additional details about the current status of the compute environment.
"""
type: pulumi.Output[str]
"""
The type of compute environment. Valid items are `EC2` or `SPOT`.
"""
def __init__(__self__, resource_name, opts=None, compute_environment_name=None, compute_environment_name_prefix=None, compute_resources=None, service_role=None, state=None, type=None, __props__=None, __name__=None, __opts__=None):
"""
Creates a AWS Batch compute environment. Compute environments contain the Amazon ECS container instances that are used to run containerized batch jobs.
For information about AWS Batch, see [What is AWS Batch?][1] .
For information about compute environment, see [Compute Environments][2] .
> **Note:** To prevent a race condition during environment deletion, make sure to set `depends_on` to the related `iam.RolePolicyAttachment`;
otherwise, the policy may be destroyed too soon and the compute environment will then get stuck in the `DELETING` state, see [Troubleshooting AWS Batch][3] .
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compute_environment_name_prefix: Creates a unique compute environment name beginning with the specified prefix. Conflicts with `compute_environment_name`.
:param pulumi.Input[dict] compute_resources: Details of the compute resources managed by the compute environment. This parameter is required for managed compute environments. See details below.
:param pulumi.Input[str] service_role: The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf.
:param pulumi.Input[str] state: The state of the compute environment. If the state is `ENABLED`, then the compute environment accepts jobs from a queue and can scale out automatically based on queues. Valid items are `ENABLED` or `DISABLED`. Defaults to `ENABLED`.
:param pulumi.Input[str] type: The type of compute environment. Valid items are `EC2` or `SPOT`.
The **compute_resources** object supports the following:
* `allocationStrategy` (`pulumi.Input[str]`) - The allocation strategy to use for the compute resource in case not enough instances of the best fitting instance type can be allocated. Valid items are `BEST_FIT_PROGRESSIVE`, `SPOT_CAPACITY_OPTIMIZED` or `BEST_FIT`. Defaults to `BEST_FIT`. See [AWS docs](https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html) for details.
* `bidPercentage` (`pulumi.Input[float]`) - Integer of minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that instance type before instances are launched. For example, if your bid percentage is 20% (`20`), then the Spot price must be below 20% of the current On-Demand price for that EC2 instance. This parameter is required for SPOT compute environments.
* `desiredVcpus` (`pulumi.Input[float]`) - The desired number of EC2 vCPUS in the compute environment.
* `ec2KeyPair` (`pulumi.Input[str]`) - The EC2 key pair that is used for instances launched in the compute environment.
* `imageId` (`pulumi.Input[str]`) - The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
* `instanceRole` (`pulumi.Input[str]`) - The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
* `instanceTypes` (`pulumi.Input[list]`) - A list of instance types that may be launched.
* `launch_template` (`pulumi.Input[dict]`) - The launch template to use for your compute resources. See details below.
* `launchTemplateId` (`pulumi.Input[str]`) - ID of the launch template. You must specify either the launch template ID or launch template name in the request, but not both.
* `launchTemplateName` (`pulumi.Input[str]`) - Name of the launch template.
* `version` (`pulumi.Input[str]`) - The version number of the launch template. Default: The default version of the launch template.
* `maxVcpus` (`pulumi.Input[float]`) - The maximum number of EC2 vCPUs that an environment can reach.
* `minVcpus` (`pulumi.Input[float]`) - The minimum number of EC2 vCPUs that an environment should maintain.
* `securityGroupIds` (`pulumi.Input[list]`) - A list of EC2 security group that are associated with instances launched in the compute environment.
* `spotIamFleetRole` (`pulumi.Input[str]`) - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. This parameter is required for SPOT compute environments.
* `subnets` (`pulumi.Input[list]`) - A list of VPC subnets into which the compute resources are launched.
* `tags` (`pulumi.Input[dict]`) - Key-value pair tags to be applied to resources that are launched in the compute environment.
* `type` (`pulumi.Input[str]`) - The type of compute environment. Valid items are `EC2` or `SPOT`.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/batch_compute_environment.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['compute_environment_name'] = compute_environment_name
__props__['compute_environment_name_prefix'] = compute_environment_name_prefix
__props__['compute_resources'] = compute_resources
if service_role is None:
raise TypeError("Missing required property 'service_role'")
__props__['service_role'] = service_role
__props__['state'] = state
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
__props__['arn'] = None
__props__['ecs_cluster_arn'] = None
__props__['status'] = None
__props__['status_reason'] = None
super(ComputeEnvironment, __self__).__init__(
'aws:batch/computeEnvironment:ComputeEnvironment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, compute_environment_name=None, compute_environment_name_prefix=None, compute_resources=None, ecs_cluster_arn=None, service_role=None, state=None, status=None, status_reason=None, type=None):
"""
Get an existing ComputeEnvironment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the compute environment.
:param pulumi.Input[str] compute_environment_name_prefix: Creates a unique compute environment name beginning with the specified prefix. Conflicts with `compute_environment_name`.
:param pulumi.Input[dict] compute_resources: Details of the compute resources managed by the compute environment. This parameter is required for managed compute environments. See details below.
:param pulumi.Input[str] ecs_cluster_arn: The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster used by the compute environment.
:param pulumi.Input[str] service_role: The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf.
:param pulumi.Input[str] state: The state of the compute environment. If the state is `ENABLED`, then the compute environment accepts jobs from a queue and can scale out automatically based on queues. Valid items are `ENABLED` or `DISABLED`. Defaults to `ENABLED`.
:param pulumi.Input[str] status: The current status of the compute environment (for example, CREATING or VALID).
:param pulumi.Input[str] status_reason: A short, human-readable string to provide additional details about the current status of the compute environment.
:param pulumi.Input[str] type: The type of compute environment. Valid items are `EC2` or `SPOT`.
The **compute_resources** object supports the following:
* `allocationStrategy` (`pulumi.Input[str]`) - The allocation strategy to use for the compute resource in case not enough instances of the best fitting instance type can be allocated. Valid items are `BEST_FIT_PROGRESSIVE`, `SPOT_CAPACITY_OPTIMIZED` or `BEST_FIT`. Defaults to `BEST_FIT`. See [AWS docs](https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html) for details.
* `bidPercentage` (`pulumi.Input[float]`) - Integer of minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that instance type before instances are launched. For example, if your bid percentage is 20% (`20`), then the Spot price must be below 20% of the current On-Demand price for that EC2 instance. This parameter is required for SPOT compute environments.
* `desiredVcpus` (`pulumi.Input[float]`) - The desired number of EC2 vCPUS in the compute environment.
* `ec2KeyPair` (`pulumi.Input[str]`) - The EC2 key pair that is used for instances launched in the compute environment.
* `imageId` (`pulumi.Input[str]`) - The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
* `instanceRole` (`pulumi.Input[str]`) - The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
* `instanceTypes` (`pulumi.Input[list]`) - A list of instance types that may be launched.
* `launch_template` (`pulumi.Input[dict]`) - The launch template to use for your compute resources. See details below.
* `launchTemplateId` (`pulumi.Input[str]`) - ID of the launch template. You must specify either the launch template ID or launch template name in the request, but not both.
* `launchTemplateName` (`pulumi.Input[str]`) - Name of the launch template.
* `version` (`pulumi.Input[str]`) - The version number of the launch template. Default: The default version of the launch template.
* `maxVcpus` (`pulumi.Input[float]`) - The maximum number of EC2 vCPUs that an environment can reach.
* `minVcpus` (`pulumi.Input[float]`) - The minimum number of EC2 vCPUs that an environment should maintain.
* `securityGroupIds` (`pulumi.Input[list]`) - A list of EC2 security group that are associated with instances launched in the compute environment.
* `spotIamFleetRole` (`pulumi.Input[str]`) - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. This parameter is required for SPOT compute environments.
* `subnets` (`pulumi.Input[list]`) - A list of VPC subnets into which the compute resources are launched.
* `tags` (`pulumi.Input[dict]`) - Key-value pair tags to be applied to resources that are launched in the compute environment.
* `type` (`pulumi.Input[str]`) - The type of compute environment. Valid items are `EC2` or `SPOT`.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/batch_compute_environment.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["compute_environment_name"] = compute_environment_name
__props__["compute_environment_name_prefix"] = compute_environment_name_prefix
__props__["compute_resources"] = compute_resources
__props__["ecs_cluster_arn"] = ecs_cluster_arn
__props__["service_role"] = service_role
__props__["state"] = state
__props__["status"] = status
__props__["status_reason"] = status_reason
__props__["type"] = type
return ComputeEnvironment(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 83.232558
| 412
| 0.711316
| 2,381
| 17,895
| 5.222176
| 0.122218
| 0.102783
| 0.055734
| 0.027747
| 0.818482
| 0.804488
| 0.796767
| 0.787196
| 0.774489
| 0.769262
| 0
| 0.003878
| 0.207488
| 17,895
| 214
| 413
| 83.621495
| 0.872867
| 0.535345
| 0
| 0.027778
| 1
| 0
| 0.165056
| 0.041003
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.013889
| 0.083333
| 0.027778
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7cf3bbbd277cee29004fcf2fed4b4d40e9bc02be
| 11,274
|
py
|
Python
|
test/features/test_actions_publiques.py
|
loic-simon/lg-rez
|
c5eb8af7b2146fca110ee50a9209529f0ecfca1a
|
[
"MIT"
] | 4
|
2020-08-13T17:07:51.000Z
|
2021-04-21T00:29:33.000Z
|
test/features/test_actions_publiques.py
|
loic-simon/lg-rez
|
c5eb8af7b2146fca110ee50a9209529f0ecfca1a
|
[
"MIT"
] | 9
|
2021-03-22T00:52:18.000Z
|
2021-10-05T23:46:38.000Z
|
test/features/test_actions_publiques.py
|
loic-simon/lg-rez
|
c5eb8af7b2146fca110ee50a9209529f0ecfca1a
|
[
"MIT"
] | 1
|
2020-10-17T15:04:23.000Z
|
2020-10-17T15:04:23.000Z
|
import unittest
from unittest import mock
from lgrez import config, bdd
from lgrez.features import actions_publiques
from test import mock_discord, mock_bdd, mock_env
class TestActionsPubliques(unittest.IsolatedAsyncioTestCase):
"""Unit tests for lgrez.features.actions_publiques commands."""
def setUp(self):
mock_discord.mock_config()
self.cog = actions_publiques.ActionsPubliques(config.bot)
def tearDown(self):
mock_discord.unmock_config()
@mock_bdd.patch_db # Empty database for this method
@mock.patch("lgrez.config.Channel.haros.send")
@mock.patch("lgrez.config.Channel.debats.send")
@mock_discord.interact()
async def test_haro(self, send_debats_patch, send_haros_patch):
"""Unit tests for !haro command."""
# async def haro(self, ctx, *, cible=None)
haro = self.cog.haro
mock_bdd.add_campsroles()
joueur = bdd.Joueur(discord_id=1, chan_id_=11, nom="Joueur1",
vote_condamne_=None)
joueur.add()
cible = bdd.Joueur(discord_id=2, chan_id_=21, nom="Joueur2")
cible.add()
# no vote
ctx = mock_discord.get_ctx(haro, cible="Joueur2", _caller_id=1)
await ctx.invoke()
ctx.assert_sent("Pas de vote")
self.assertFalse(bdd.CandidHaro.query.all()) # no haro created
send_debats_patch.assert_not_called()
send_haros_patch.assert_not_called()
# cible = dead
joueur.vote_condamne_ = "bzzzt"
cible.statut = bdd.Statut.mort
cible.update()
ctx = mock_discord.get_ctx(haro, cible="Joueur2", _caller_id=1)
await ctx.invoke()
ctx.assert_sent("pas assez souffert")
self.assertFalse(bdd.CandidHaro.query.all()) # no haro created
send_debats_patch.assert_not_called()
send_haros_patch.assert_not_called()
# cible = immortel
cible.statut = bdd.Statut.immortel
cible.update()
ctx = mock_discord.get_ctx(haro, cible="Joueur2", _caller_id=1)
await ctx.invoke()
ctx.assert_sent("Comment oses-tu")
self.assertFalse(bdd.CandidHaro.query.all()) # no haro created
send_debats_patch.assert_not_called()
send_haros_patch.assert_not_called()
# cible = MV : ok
cible.statut = bdd.Statut.MV
cible.update()
ctx = mock_discord.get_ctx(haro, cible="Joueur2", _caller_id=1)
with mock_discord.interact(
("wait_for_message_here", ctx.new_message(".")),
("yes_no", False)):
await ctx.invoke()
ctx.assert_sent("quelle est la raison", "", "")
self.assertFalse(bdd.CandidHaro.query.all()) # no haro created
send_debats_patch.assert_not_called()
send_haros_patch.assert_not_called()
# ok, and abort
cible.statut = bdd.Statut.vivant
cible.update()
ctx = mock_discord.get_ctx(haro, cible="Joueur2", _caller_id=1)
with mock_discord.interact(
("wait_for_message_here", ctx.new_message("pakontan")),
("yes_no", False)):
# give reason then abort (answer "no" at check)
await ctx.invoke()
ctx.assert_sent("quelle est la raison",
"tout bon ?",
"Mission aborted")
embed = ctx.send.call_args_list[1].kwargs["embed"]
self.assertIn("contre Joueur2", embed.title)
self.assertIn("pakontan", embed.description)
self.assertIn("Joueur1", embed.author.name)
self.assertFalse(bdd.CandidHaro.query.all()) # no haro created
send_debats_patch.assert_not_called()
send_haros_patch.assert_not_called()
# validate
ctx = mock_discord.get_ctx(haro, cible="Joueur2", _caller_id=1)
with mock_discord.interact(
("wait_for_message_here", ctx.new_message("pakontan")),
("yes_no", True)):
# give reason then abort (answer "no" at check)
await ctx.invoke()
ctx.assert_sent("quelle est la raison",
"tout bon ?",
"c'est parti")
embed = ctx.send.call_args_list[1].kwargs["embed"]
haros = bdd.CandidHaro.query.all()
self.assertEqual(len(haros), 2) # 2 haros created
self.assertEqual({haro.joueur for haro in haros}, {joueur, cible})
self.assertEqual(haros[0].type, bdd.CandidHaroType.haro)
self.assertEqual(haros[1].type, bdd.CandidHaroType.haro)
send_haros_patch.assert_called_once()
self.assertEqual(send_haros_patch.call_args.kwargs, {"embed": embed})
send_haros_patch.reset_mock()
send_debats_patch.assert_called_once()
send_debats_patch.reset_mock()
# validate, but haroted already registered
bdd.CandidHaro.delete(*haros)
self.assertFalse(bdd.CandidHaro.query.all()) # no haros
bdd.CandidHaro(joueur=cible, type=bdd.CandidHaroType.haro).add()
ctx = mock_discord.get_ctx(haro, cible="Joueur2", _caller_id=1)
with mock_discord.interact(
("wait_for_message_here", ctx.new_message("pakontan")),
("yes_no", True)):
# give reason then abort (answer "no" at check)
await ctx.invoke()
ctx.assert_sent("", "", "")
haros = bdd.CandidHaro.query.all()
self.assertEqual(len(haros), 2) # 2 haros created
self.assertEqual({haro.joueur for haro in haros}, {joueur, cible})
self.assertEqual(haros[0].type, bdd.CandidHaroType.haro)
self.assertEqual(haros[1].type, bdd.CandidHaroType.haro)
send_haros_patch.assert_called_once()
send_haros_patch.reset_mock()
send_debats_patch.assert_called_once()
send_debats_patch.reset_mock()
# validate, but haroter already registered
bdd.CandidHaro.delete(*haros)
self.assertFalse(bdd.CandidHaro.query.all()) # no haros
bdd.CandidHaro(joueur=joueur, type=bdd.CandidHaroType.haro).add()
ctx = mock_discord.get_ctx(haro, cible="Joueur2", _caller_id=1)
with mock_discord.interact(
("wait_for_message_here", ctx.new_message("pakontan")),
("yes_no", True)):
# give reason then abort (answer "no" at check)
await ctx.invoke()
ctx.assert_sent("", "", "")
haros = bdd.CandidHaro.query.all()
self.assertEqual(len(haros), 2) # 2 haros created
self.assertEqual({haro.joueur for haro in haros}, {joueur, cible})
self.assertEqual(haros[0].type, bdd.CandidHaroType.haro)
self.assertEqual(haros[1].type, bdd.CandidHaroType.haro)
send_haros_patch.assert_called_once()
send_haros_patch.reset_mock()
send_debats_patch.assert_called_once()
send_debats_patch.reset_mock()
# validate, but haroter and haroted already registered
bdd.CandidHaro.delete(*haros)
self.assertFalse(bdd.CandidHaro.query.all()) # no haros
bdd.CandidHaro(joueur=joueur, type=bdd.CandidHaroType.haro).add()
bdd.CandidHaro(joueur=cible, type=bdd.CandidHaroType.haro).add()
ctx = mock_discord.get_ctx(haro, cible="Joueur2", _caller_id=1)
with mock_discord.interact(
("wait_for_message_here", ctx.new_message("pakontan")),
("yes_no", True)):
# give reason then abort (answer "no" at check)
await ctx.invoke()
ctx.assert_sent("", "", "")
haros = bdd.CandidHaro.query.all()
self.assertEqual(len(haros), 2) # 2 haros created
self.assertEqual({haro.joueur for haro in haros}, {joueur, cible})
self.assertEqual(haros[0].type, bdd.CandidHaroType.haro)
self.assertEqual(haros[1].type, bdd.CandidHaroType.haro)
send_haros_patch.assert_called_once()
send_haros_patch.reset_mock()
send_debats_patch.assert_called_once()
send_debats_patch.reset_mock()
@mock_bdd.patch_db # Empty database for this method
@mock.patch("lgrez.config.Channel.haros.send")
@mock.patch("lgrez.config.Channel.debats.send")
@mock_discord.interact()
async def test_candid(self, send_debats_patch, send_candids_patch):
"""Unit tests for !candid command."""
# async def candid(self, ctx)
candid = self.cog.candid
mock_bdd.add_campsroles()
joueur = bdd.Joueur(discord_id=1, chan_id_=11, nom="Joueur1",
vote_maire_=None)
joueur.add()
# no vote
ctx = mock_discord.get_ctx(candid, _caller_id=1)
await ctx.invoke()
ctx.assert_sent("Pas de vote")
self.assertFalse(bdd.CandidHaro.query.all()) # no candid created
send_debats_patch.assert_not_called()
send_candids_patch.assert_not_called()
# joueur already candidated
joueur.vote_maire_ = "bzzzt"
cand = bdd.CandidHaro(joueur=joueur,
type=bdd.CandidHaroType.candidature)
cand.add()
ctx = mock_discord.get_ctx(candid, _caller_id=1)
await ctx.invoke()
ctx.assert_sent("déjà présenté")
self.assertEqual(len(bdd.CandidHaro.query.all()), 1) # none created
send_debats_patch.assert_not_called()
send_candids_patch.assert_not_called()
# ok, and abort
cand.delete()
ctx = mock_discord.get_ctx(candid, _caller_id=1)
with mock_discord.interact(
("wait_for_message_here", ctx.new_message("votépourmoi")),
("yes_no", False)):
# give reason then abort (answer "no" at check)
await ctx.invoke()
ctx.assert_sent("Quel est ton programme",
"tout bon ?",
"Mission aborted")
embed = ctx.send.call_args_list[1].kwargs["embed"]
self.assertIn("Joueur1", embed.title)
self.assertIn("votépourmoi", embed.description)
self.assertIn("Joueur1", embed.author.name)
self.assertFalse(bdd.CandidHaro.query.all()) # no candid created
send_debats_patch.assert_not_called()
send_candids_patch.assert_not_called()
# validate
ctx = mock_discord.get_ctx(candid, _caller_id=1)
with mock_discord.interact(
("wait_for_message_here", ctx.new_message("votépourmoi")),
("yes_no", True)):
# give reason then abort (answer "no" at check)
await ctx.invoke()
ctx.assert_sent("Quel est ton programme",
"tout bon ?",
"c'est parti")
embed = ctx.send.call_args_list[1].kwargs["embed"]
candids = bdd.CandidHaro.query.all()
self.assertEqual(len(candids), 1) # 2 candids created
self.assertEqual(candids[0].joueur, joueur)
self.assertEqual(candids[0].type, bdd.CandidHaroType.candidature)
send_candids_patch.assert_called_once()
self.assertEqual(send_candids_patch.call_args.kwargs, {"embed": embed})
send_candids_patch.reset_mock()
send_debats_patch.assert_called_once()
send_debats_patch.reset_mock()
| 43.528958
| 79
| 0.626752
| 1,367
| 11,274
| 4.943672
| 0.112655
| 0.04232
| 0.044392
| 0.049719
| 0.830275
| 0.823468
| 0.823025
| 0.789583
| 0.786919
| 0.786919
| 0
| 0.007535
| 0.258382
| 11,274
| 258
| 80
| 43.697674
| 0.800742
| 0.090296
| 0
| 0.774038
| 0
| 0
| 0.083539
| 0.028997
| 0
| 0
| 0
| 0
| 0.370192
| 1
| 0.009615
| false
| 0
| 0.024038
| 0
| 0.038462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b0235507612ab5a34c7a0e17512b97c66e0cf5f
| 16,502
|
py
|
Python
|
backend/resource/migrations/0001_initial.py
|
sebaskun/budget_app
|
08d67be17a512d91ee49a4e9088c32392410be46
|
[
"Unlicense"
] | null | null | null |
backend/resource/migrations/0001_initial.py
|
sebaskun/budget_app
|
08d67be17a512d91ee49a4e9088c32392410be46
|
[
"Unlicense"
] | null | null | null |
backend/resource/migrations/0001_initial.py
|
sebaskun/budget_app
|
08d67be17a512d91ee49a4e9088c32392410be46
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 2.0.2 on 2019-05-29 07:08
import backend.core.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CategoryEquipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=150, verbose_name='nombre')),
('description', models.TextField(blank=True, null=True, verbose_name='descripción')),
('image', models.ImageField(blank=True, null=True, upload_to='image_category', verbose_name='imagen')),
('created_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='categoryequipment_created', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='categoryequipment_modified', to=settings.AUTH_USER_MODEL)),
('padre', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='category_padre', to='resource.CategoryEquipment')),
],
options={
'verbose_name': 'categoría de equipo',
'verbose_name_plural': 'categorías de equipos',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='CategoryManpower',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=150, verbose_name='nombre')),
('description', models.TextField(blank=True, null=True, verbose_name='descripción')),
('image', models.ImageField(blank=True, null=True, upload_to='image_category', verbose_name='imagen')),
('created_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='categorymanpower_created', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='categorymanpower_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'categoría de mano de obra',
'verbose_name_plural': 'categorías de mano de obra',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='CategoryMaterial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=150, verbose_name='nombre')),
('description', models.TextField(blank=True, null=True, verbose_name='descripción')),
('image', models.ImageField(blank=True, null=True, upload_to='image_category', verbose_name='imagen')),
('created_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='categorymaterial_created', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='categorymaterial_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'categoría de material',
'verbose_name_plural': 'categorías de materiales',
},
),
migrations.CreateModel(
name='Equipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('code', backend.core.utils.CharNullField(blank=True, max_length=30, null=True, unique=True, verbose_name='código')),
('name', models.CharField(max_length=200, verbose_name='nombre')),
('description', models.TextField(blank=True, null=True, verbose_name='descripción')),
('unit', models.CharField(blank=True, max_length=15, null=True, verbose_name='unidad de costo')),
('currency', models.CharField(choices=[('S', 'S/'), ('D', '$')], default='D', max_length=1, verbose_name='moneda')),
('cost', models.DecimalField(decimal_places=6, default=0, max_digits=15, verbose_name='costo unitario')),
('pull_in_new_budget', models.BooleanField(default=False)),
('hours_equipment_operation', models.DecimalField(decimal_places=6, default=8, max_digits=15, verbose_name='horas de operación del equipo')),
('potencia', models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True, verbose_name='potencia del equipo')),
('image', models.ImageField(blank=True, null=True, upload_to='equipment', verbose_name='image')),
('has_combustible', models.BooleanField(default=False)),
('tipo_combustible', models.CharField(blank=True, choices=[('GO', 'GasOil'), ('GS', 'Gasolina')], max_length=2, null=True, verbose_name='tipo de combustible')),
('is_deleted', models.BooleanField(default=False)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='resource.CategoryEquipment')),
('created_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='equipment_created', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='equipment_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'equipo',
'verbose_name_plural': 'equipos',
},
),
migrations.CreateModel(
name='Manpower',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('code', backend.core.utils.CharNullField(blank=True, max_length=30, null=True, unique=True, verbose_name='código')),
('name', models.CharField(max_length=200, verbose_name='nombre')),
('description', models.TextField(blank=True, null=True, verbose_name='descripción')),
('unit', models.CharField(blank=True, max_length=15, null=True, verbose_name='unidad de costo')),
('currency', models.CharField(choices=[('S', 'S/'), ('D', '$')], default='D', max_length=1, verbose_name='moneda')),
('cost', models.DecimalField(decimal_places=6, default=0, max_digits=15, verbose_name='costo unitario')),
('pull_in_new_budget', models.BooleanField(default=False)),
('sueldo_bruto', models.DecimalField(decimal_places=6, default=0, max_digits=15, verbose_name='sueldo bruto')),
('type_cost', models.CharField(choices=[('D', 'DIRECTO'), ('I', 'INDIRECTO')], default='D', max_length=1, verbose_name='tipo de costo')),
('allows_ratio', models.PositiveIntegerField(default=1, verbose_name='Permite coeficiente?')),
('tipo_regimen', models.CharField(choices=[('C', 'COMÚN'), ('V', 'CIVIL')], default='C', max_length=1, verbose_name='tipo de regimen')),
('created_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='manpower_created', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='manpower_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'mano de obra',
'verbose_name_plural': 'manos de obra',
'ordering': ('code', 'name'),
},
),
migrations.CreateModel(
name='Material',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('code', backend.core.utils.CharNullField(blank=True, max_length=30, null=True, unique=True, verbose_name='código')),
('name', models.CharField(max_length=200, verbose_name='nombre')),
('description', models.TextField(blank=True, null=True, verbose_name='descripción')),
('unit', models.CharField(blank=True, max_length=15, null=True, verbose_name='unidad de costo')),
('currency', models.CharField(choices=[('S', 'S/'), ('D', '$')], default='D', max_length=1, verbose_name='moneda')),
('cost', models.DecimalField(decimal_places=6, default=0, max_digits=15, verbose_name='costo unitario')),
('pull_in_new_budget', models.BooleanField(default=False)),
('image', models.ImageField(blank=True, null=True, upload_to='material', verbose_name='image')),
('class_cost', models.CharField(choices=[('S', 'Estandar'), ('M', 'Medico'), ('W', 'Taller'), ('O', 'Operativo'), ('V', 'Varios'), ('I', 'Insumo médico'), ('E', 'EPP')], default='S', max_length=1, verbose_name='clase de costo')),
('is_subcontract', models.BooleanField(default=False)),
('is_deleted', models.BooleanField(default=False)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='resource.CategoryMaterial')),
('created_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='material_created', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='material_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'material',
'verbose_name_plural': 'materiales',
'ordering': ['name', 'code'],
},
),
migrations.CreateModel(
name='Subcontract',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('code', backend.core.utils.CharNullField(blank=True, max_length=30, null=True, unique=True, verbose_name='código')),
('name', models.CharField(max_length=200, verbose_name='nombre')),
('description', models.TextField(blank=True, null=True, verbose_name='descripción')),
('unit', models.CharField(blank=True, max_length=15, null=True, verbose_name='unidad de costo')),
('currency', models.CharField(choices=[('S', 'S/'), ('D', '$')], default='D', max_length=1, verbose_name='moneda')),
('cost', models.DecimalField(decimal_places=6, default=0, max_digits=15, verbose_name='costo unitario')),
('pull_in_new_budget', models.BooleanField(default=False)),
('is_deleted', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subcontract_created', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subcontract_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'subcontrato',
'verbose_name_plural': 'subcontratos',
'ordering': ['name', 'code'],
},
),
migrations.CreateModel(
name='Vacuna',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('nombre', models.CharField(max_length=50, unique=True, verbose_name='nombre')),
('moneda', models.CharField(choices=[('S', 'S/'), ('D', '$')], default='D', max_length=1, verbose_name='moneda')),
('is_deleted', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vacuna_created', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vacuna_modified', to=settings.AUTH_USER_MODEL)),
('ubicacion', models.ForeignKey(blank=True, limit_choices_to={'grupo': 'UBC'}, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ubicaciones_vacuna', to='core.GruposVarios', verbose_name='ubicacion')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='VacunaDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('quantity', models.DecimalField(decimal_places=6, default=1, max_digits=15, verbose_name='cantidad')),
('observacion', models.TextField(blank=True, null=True, verbose_name='observacion')),
('is_deleted', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vacunadetail_created', to=settings.AUTH_USER_MODEL)),
('material', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='material_vacunas_budget', to='resource.Material', verbose_name='vacunas material')),
('modified_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vacunadetail_modified', to=settings.AUTH_USER_MODEL)),
('vacuna', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='details_vacuna', to='resource.Vacuna', verbose_name='Vacuna')),
],
options={
'ordering': ('material',),
},
),
migrations.AlterUniqueTogether(
name='vacunadetail',
unique_together={('vacuna', 'material')},
),
]
| 76.046083
| 245
| 0.629318
| 1,797
| 16,502
| 5.59488
| 0.102949
| 0.079869
| 0.040581
| 0.054705
| 0.81291
| 0.789238
| 0.755719
| 0.749552
| 0.745276
| 0.71663
| 0
| 0.007585
| 0.217004
| 16,502
| 216
| 246
| 76.398148
| 0.770529
| 0.002727
| 0
| 0.569378
| 1
| 0
| 0.173017
| 0.017928
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019139
| 0
| 0.038278
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b1181a42095f821851ef03c45db417fb3dc1acf
| 93,188
|
py
|
Python
|
venv/lib/python3.7/site-packages/slackclient_cli/slack_api_specific.py
|
gigumbrajaguru/SlackTats
|
77fe93711ae3502f833bf3ba340f36d7da82f9cf
|
[
"Apache-2.0"
] | 2
|
2018-10-04T06:12:38.000Z
|
2021-08-02T16:39:12.000Z
|
venv/lib/python3.7/site-packages/slackclient_cli/slack_api_specific.py
|
gigumbrajaguru/SlackTats
|
77fe93711ae3502f833bf3ba340f36d7da82f9cf
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.7/site-packages/slackclient_cli/slack_api_specific.py
|
gigumbrajaguru/SlackTats
|
77fe93711ae3502f833bf3ba340f36d7da82f9cf
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:16:58.000Z
|
2021-11-05T22:16:58.000Z
|
# If there is an API that understands the specification this code will be very short and it will dynamically change with specification change
def get():
return {
# unavailable: slackclient needs token
# "api.test": [
# {
# "Argument": "error",
# "Required": "Optional",
# "Description": "Error response to return"
# },
# {
# "Argument": "foo",
# "Required": "Optional",
# "Description": "example property to return"
# },
# ],
"auth.revoke": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token."
},
{
"Argument": "test",
"Required": "Optional",
"Description": "Setting this parameter to 1 triggers a testing mode where the specified token will not actually be revoked.",
},
],
"auth.test": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: identify"
},
],
"bots.info": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: users:read"
},
{
"Argument": "bot",
"Required": "Optional",
"Description": "Bot user to get info on",
},
],
"channels.archive": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to archive"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to archive"
},
],
"channels.create": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "name",
"Required": "Required",
"Description": "Name of channel to create"
},
],
"channels.history": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:history"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to fetch history for."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to fetch history for."
},
{
"Argument": "latest",
"Required": "Optional",
"Description": "End of time range of messages to include in results. default=now"
},
{
"Argument": "oldest",
"Required": "Optional",
"Description": "Start of time range of messages to include in results. default=0"
},
{
"Argument": "inclusive",
"Required": "Optional",
"Description": "Include messages with latest or oldest timestamp in results. default=0"
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of messages to return, between 1 and 1000. default=100"
},
{
"Argument": "unreads",
"Required": "Optional",
"Description": "Include unread_count_display in the output? default=0"
},
],
"channels.info": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: channels:read"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to get info on"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to get info on"
},
],
"channels.invite": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to invite user to."
},
{
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to invite user to."
},
{
"Argument": "user",
# "Required": "Required",
"Required": "Optional",
"Description": "User ID to invite to channel."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "User name to invite to channel."
},
],
"channels.join": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "name",
"Required": "Required",
"Description": "Name of channel to join"
},
],
"channels.kick": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to remove user from."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to remove user from."
},
{
"Argument": "user",
# "Required": "Required",
"Required": "Optional",
"Description": "User ID to remove from channel."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "User name to remove from channel."
},
],
"channels.leave": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to leave"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to leave"
},
],
"channels.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: channels:read"
},
{
"Argument": "exclude_archived",
"Required": "Optional",
"Description": "Don't return archived channels. default=false"
},
],
"channels.mark": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to set reading cursor in."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to set reading cursor in."
},
{
"Argument": "ts",
"Required": "Required",
"Description": "Timestamp of the most recently seen message."
},
],
"channels.rename": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to rename"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to rename"
},
{
"Argument": "name",
"Required": "Required",
"Description": "New name for channel."
},
],
"channels.replies": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:history"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to fetch thread from"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to fetch thread from"
},
{
"Argument": "thread_ts",
"Required": "Required",
"Description": "Unique identifier of a thread's parent message"
},
],
"channels.setPurpose": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to set the purpose of"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to set the purpose of"
},
{
"Argument": "purpose",
"Required": "Required",
"Description": "The new purpose"
},
],
"channels.setTopic": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to set the topic of"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to set the topic of"
},
{
"Argument": "topic",
"Required": "Required",
"Description": "The new topic"
},
],
"channels.unarchive": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: channels:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to unarchive"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to unarchive"
},
],
"chat.delete": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: chat:write:bot or chat:write:user"
},
{
"Argument": "ts",
"Required": "Required",
"Description": "Timestamp of the message to be updated."
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID containing the message to be updated."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name containing the message to be updated."
},
{
"Argument": "as_user",
"Required": "Optional",
"Description": "Pass true to update the message as the authed user. Bot users in this context are considered authed users."
},
],
"chat.meMessage": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: chat:write:user"
},
{
"Argument": "channel",
"Required": "Required",
"Description": "Channel to send message to. Can be a public channel, private group or IM channel. Can be an encoded ID, or a name."
},
{
"Argument": "text",
"Required": "Required",
"Description": "Text of the message to send."
},
],
"chat.postMessage": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: chat:write:bot or chat:write:user"
},
{
"Argument": "channel",
"Required": "Required",
"Description": "Channel, private group, or IM channel to send message to. Can be an encoded ID, or a name. See below for more details."
},
{
"Argument": "text",
"Required": "Required",
"Description": "Text of the message to send. See below for an explanation of formatting. This field is usually required, unless you're providing only attachments instead."
},
{
"Argument": "parse",
"Required": "Optional",
"Description": "Text of the message to send. See below for an explanation of formatting. This field is usually required, unless you're providing only attachments instead.Change how messages are treated. Defaults to none. See below."
},
{
"Argument": "link_names",
"Required": "Optional",
"Description": "Find and link channel names and usernames."
},
{
"Argument": "attachments",
"Required": "Optional",
"Description": "Structured message attachments."
},
{
"Argument": "unfurl_links",
"Required": "Optional",
"Description": "Pass true to enable unfurling of primarily text-based content."
},
{
"Argument": "unfurl_media",
"Required": "Optional",
"Description": "Pass false to disable unfurling of media content."
},
{
"Argument": "username",
"Required": "Optional",
"Description": "Set your bot's user name. Must be used in conjunction with as_user set to false, otherwise ignored. See authorship below."
},
{
"Argument": "as_user",
"Required": "Optional",
"Description": "Pass true to post the message as the authed user, instead of as a bot. Defaults to false. See authorship below."
},
{
"Argument": "icon_url",
"Required": "Optional",
"Description": "URL to an image to use as the icon for this message. Must be used in conjunction with as_user set to false, otherwise ignored. See authorship below."
},
{
"Argument": "icon_emoji",
"Required": "Optional",
"Description": "Emoji to use as the icon for this message. Overrides icon_url. Must be used in conjunction with as_user set to false, otherwise ignored. See authorship below."
},
{
"Argument": "thread_ts",
"Required": "Optional",
"Description": "Provide another message's ts value to make this message a reply. Avoid using a reply's ts value; use its parent instead."
},
{
"Argument": "reply_broadcast",
"Required": "Optional",
"Description": "Used in conjunction with thread_ts and indicates whether reply should be made visible to everyone in the channel or conversation. Defaults to false."
},
],
"chat.update": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: chat:write:bot or chat:write:user"
},
{
"Argument": "ts",
"Required": "Required",
"Description": "Timestamp of the message to be updated."
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID containing the message to be updated."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name containing the message to be updated."
},
{
"Argument": "text",
"Required": "Required",
"Description": "New text for the message, using the default formatting rules."
},
{
"Argument": "attachments",
"Required": "Optional",
"Description": "Structured message attachments."
},
{
"Argument": "parse",
"Required": "Optional",
"Description": "Change how messages are treated. Defaults to client, unlike chat.postMessage."
},
{
"Argument": "link_names",
"Required": "Optional",
"Description": "Find and link channel names and usernames. Defaults to none. This parameter should be used in conjunction with parse. To set link_names to 1, specify a parse mode of full."
},
{
"Argument": "as_user",
"Required": "Optional",
"Description": "Pass true to update the message as the authed user. Bot users in this context are considered authed users."
},
],
"dnd.endDnd": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: dnd:write"
},
],
"dnd.endSnooze": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: dnd:write"
},
],
"dnd.info": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: dnd:read"
},
{
"Argument": "user",
"Required": "Optional",
"Description": "User ID to fetch status for (defaults to current user)"
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "User name to fetch status for (defaults to current user)"
},
],
"dnd.setSnooze": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: dnd:write"
},
{
"Argument": "num_minutes",
"Required": "Required",
"Description": "Number of minutes, from now, to snooze until."
},
],
"dnd.teamInfo": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: dnd:read"
},
{
"Argument": "users",
"Required": "Optional",
"Description": "Comma-separated list of user IDs to fetch Do Not Disturb status for"
},
{
"extra_function": "user_names_to_ids",
"Argument": "ex-users",
"Required": "Optional",
"Description": "Comma-separated list of user names to fetch Do Not Disturb status for"
},
],
"emoji.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: emoji:read"
},
],
"files.comments.add": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: files:write:user"
},
{
"Argument": "file",
"Required": "Required",
"Description": "File to add a comment to."
},
{
"Argument": "comment",
"Required": "Required",
"Description": "Text of the comment to add."
},
{
"Argument": "channel",
"Required": "Optional",
"Description": "Channel id (encoded) of which location to associate with the new comment."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name of which location to associate with the new comment."
},
],
"files.comments.delete": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: files:write:user"
},
{
"Argument": "file",
"Required": "Required",
"Description": "File to delete a comment from."
},
{
"Argument": "id",
"Required": "Required",
"Description": "The comment to delete."
},
],
"files.comments.edit": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: files:write:user"
},
{
"Argument": "file",
"Required": "Required",
"Description": "File containing the comment to edit."
},
{
"Argument": "id",
"Required": "Required",
"Description": "The comment to edit."
},
{
"Argument": "comment",
"Required": "Required",
"Description": "Text of the comment to edit."
},
],
"files.delete": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: files:write:user"
},
{
"Argument": "file",
"Required": "Required",
"Description": "ID of file to delete."
},
],
"files.info": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: files:read"
},
{
"Argument": "file",
"Required": "Required",
"Description": "Specify a file by providing its ID."
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of items to return per page. default=100"
},
{
"Argument": "page",
"Required": "Optional",
"Description": "Page number of results to return. default=1"
},
],
"files.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: files:read"
},
{
"Argument": "user",
"Required": "Optional",
"Description": "Filter files created by a single user ID."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "Filter files created by a single user name."
},
{
"Argument": "channel",
"Required": "Optional",
"Description": "Filter files appearing in a specific channel, indicated by its ID."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Filter files appearing in a specific channel, indicated by its name."
},
{
"Argument": "ts_from",
"Required": "Optional",
"Description": "Filter files created after this timestamp (inclusive). default=0"
},
{
"Argument": "ts_to",
"Required": "Optional",
"Description": "Filter files created before this timestamp (inclusive). default=now"
},
{
"Argument": "types",
"Required": "Optional",
"Description": "Filter files by type: /all - All files/spaces - Posts/snippets - Snippets/images - Image files/gdocs - Google docs/zips - Zip files/pdfs - PDF files/You can pass multiple values in the types argument, like types=spaces,snippets.The default value is all, which does not filter the list. default=all"
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of items to return per page. default=100"
},
{
"Argument": "page",
"Required": "Optional",
"Description": "Page number of results to return. default=1"
},
],
"files.revokePublicURL": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: files:write:user"
},
{
"Argument": "file",
"Required": "Required",
"Description": "File to revoke"
},
],
"files.sharedPublicURL": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: files:write:user"
},
{
"Argument": "file",
"Required": "Required",
"Description": "File to share"
},
],
"files.upload": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token.Requires scope: files:write:user"
},
{
"Argument": "file",
"Required": "Optional",
"Description": "File contents via multipart/form-data. If omitting this parameter, you must submit content."
},
{
"Argument": "content",
"Required": "Optional",
"Description": "File contents via a POST variable. If omitting this parameter, you must provide a file."
},
{
"extra_function": "file_upload_by_filename",
"Argument": "ex-content",
"Required": "Optional",
"Description": "File name to be uploaded. This is a proprietary option, and the content of the file is included in the 'content' argument"
},
{
"Argument": "filetype",
"Required": "Optional",
"Description": "A file type identifier."
},
{
"Argument": "filename",
"Required": "Optional",
"Description": "Filename of file"
},
{
"Argument": "title",
"Required": "Optional",
"Description": "Title of file."
},
{
"Argument": "initial_comment",
"Required": "Optional",
"Description": "Initial comment to add to file."
},
{
"Argument": "channels",
"Required": "Optional",
"Description": "Comma-separated list of channel names or IDs where the file will be shared."
},
{
"extra_function": "channel_names_to_ids",
"Argument": "ex-channels",
"Required": "Optional",
"Description": "Comma-separated list of channel names or IDs where the file will be shared."
},
],
"groups.archive": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to archive"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to archive"
},
],
"groups.close": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to close."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to close."
},
],
"groups.create": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "name",
"Required": "Required",
"Description": "Name of private channel to creat"
},
],
"groups.createChild": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to clone and archive."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to clone and archive."
},
],
"groups.history": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:history"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to fetch history for."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to fetch history for."
},
{
"Argument": "latest",
"Required": "Optional",
"Description": "End of time range of messages to include in results. default=now"
},
{
"Argument": "oldest",
"Required": "Optional",
"Description": "Start of time range of messages to include in results. default=0"
},
{
"Argument": "inclusive",
"Required": "Optional",
"Description": "Include messages with latest or oldest timestamp in results. default=0"
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of messages to return, between 1 and 1000. default=100"
},
{
"Argument": "unreads",
"Required": "Optional",
"Description": "Include unread_count_display in the output? default=0"
},
],
"groups.info": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:read"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to get info on"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to get info on"
},
],
"groups.invite": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to invite user to."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to invite user to."
},
{
"Argument": "user",
# "Required": "Required",
"Required": "Optional",
"Description": "User ID to invite."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "User name to invite."
},
],
"groups.kick": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to remove user from."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to remove user from."
},
{
"Argument": "user",
# "Required": "Required",
"Required": "Optional",
"Description": "User ID to remove from private channel."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "User name to remove from private channel."
},
],
"groups.leave": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to leave"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to leave"
},
],
"groups.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:read"
},
{
"Argument": "exclude_archived",
"Required": "Optional",
"Description": "Don't return archived private channels. default=0"
},
],
"groups.mark": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to set reading cursor in."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to set reading cursor in."
},
{
"Argument": "ts",
"Required": "Required",
"Description": "Timestamp of the most recently seen message."
},
],
"groups.open": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to open."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to open."
},
],
"groups.rename": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to rename."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to rename."
},
{
"Argument": "name",
"Required": "Required",
"Description": "New name for private channel."
},
],
"groups.replies": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:history"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to fetch thread from"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to fetch thread from"
},
{
"Argument": "thread_ts",
"Required": "Required",
"Description": "Unique identifier of a thread's parent message"
},
],
"groups.setPurpose": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to set the purpose of"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to set the purpose of"
},
{
"Argument": "purpose",
"Required": "Required",
"Description": "The new purpose"
},
],
"groups.setTopic": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to set the topic of"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to set the topic of"
},
{
"Argument": "topic",
"Required": "Required",
"Description": "The new topic"
},
],
"groups.unarchive": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: groups:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Private channel ID to unarchive"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Private channel name to unarchive"
},
],
"im.close": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: im:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Direct message channel ID to close."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Direct message channel name to close."
},
],
"im.history": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: im:history"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Direct message channel ID to fetch history for."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Direct message channel name to fetch history for."
},
{
"Argument": "latest",
"Required": "Optional",
"Description": "End of time range of messages to include in results. default=now"
},
{
"Argument": "oldest",
"Required": "Optional",
"Description": "Start of time range of messages to include in results. default=0"
},
{
"Argument": "inclusive",
"Required": "Optional",
"Description": "Include messages with latest or oldest timestamp in results. default=0"
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of messages to return, between 1 and 1000. default=100"
},
{
"Argument": "unreads",
"Required": "Optional",
"Description": "Include unread_count_display in the output? default=0"
},
],
"im.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: im:read"
},
],
"im.mark": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: im:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Direct message channel ID to set reading cursor in"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Direct message channel name to set reading cursor in"
},
{
"Argument": "ts",
"Required": "Required",
"Description": "Timestamp of the most recently seen message."
},
],
"im.open": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: im:write"
},
{
"Argument": "user",
# "Required": "Required",
"Required": "Optional",
"Description": "User ID to open a direct message channel with."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "User name to open a direct message channel with."
},
{
"Argument": "return_im",
"Required": "Optional",
"Description": "Boolean, indicates you want the full IM channel definition in the response."
},
],
"im.replies": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: im:history"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Direct message channel ID to fetch thread from"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Direct message channel name to fetch thread from"
},
{
"Argument": "thread_ts",
"Required": "Required",
"Description": "Unique identifier of a thread's parent message"
},
],
"mpim.close": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: mpim:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "MPIM to close."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "MPIM channel name to close."
},
],
"mpim.history": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: mpim:history"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Multiparty direct message (channel id) to fetch history for."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Multiparty direct message (channel name) to fetch history for."
},
{
"Argument": "latest",
"Required": "Optional",
"Description": "End of time range of messages to include in results. default=now"
},
{
"Argument": "oldest",
"Required": "Optional",
"Description": "Start of time range of messages to include in results. default=0"
},
{
"Argument": "inclusive",
"Required": "Optional",
"Description": "Include messages with latest or oldest timestamp in results. default=0"
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of messages to return, between 1 and 1000. default=100"
},
{
"Argument": "unreads",
"Required": "Optional",
"Description": "Include unread_count_display in the output? default=0"
},
],
"mpim.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: mpim:read"
},
],
"mpim.mark": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: mpim:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "multiparty direct message channel ID to set reading cursor in."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "multiparty direct message channel name to set reading cursor in."
},
{
"Argument": "ts",
"Required": "Required",
"Description": "Timestamp of the most recently seen message."
},
],
"mpim.open": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: mpim:write"
},
{
"Argument": "users",
# "Required": "Required",
"Required": "Optional",
"Description": "Comma separated lists of user IDs. The ordering of the users is preserved whenever a MPIM group is returned."
},
{
"extra_function": "user_names_to_ids",
"Argument": "ex-users",
"Required": "Optional",
"Description": "Comma separated lists of user names. The ordering of the users is preserved whenever a MPIM group is returned."
},
],
"mpim.replies": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: mpim:history"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Multiparty direct message channel ID to fetch thread from."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Multiparty direct message channel name to fetch thread from."
},
{
"Argument": "thread_ts",
"Required": "Required",
"Description": "Unique identifier of a thread's parent message."
},
],
"oauth.access": [
{
"Argument": "client_id",
"Required": "Required",
"Description": "Issued when you created your application."
},
{
"Argument": "client_secret",
"Required": "Required",
"Description": "Issued when you created your application."
},
{
"Argument": "code",
"Required": "Required",
"Description": "The code param returned via the OAuth callback."
},
{
"Argument": "redirect_uri",
"Required": "Optional",
"Description": "This must match the originally submitted URI (if one was sent)."
},
],
"pins.add": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: pins:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to pin the item in."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to pin the item in."
},
{
"Argument": "file",
"Required": "Optional",
"Description": "Channel to pin the item in."
},
{
"Argument": "file_comment",
"Required": "Optional",
"Description": "File comment to pin."
},
{
"Argument": "timestamp",
"Required": "Optional",
"Description": "Timestamp of the message to pin."
},
],
"pins.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: pins:read"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID to get pinned items for."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to get pinned items for."
},
],
"pins.remove": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: pins:write"
},
{
"Argument": "channel",
# "Required": "Required",
"Required": "Optional",
"Description": "Channel ID where the item is pinned to."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name where the item is pinned to."
},
{
"Argument": "file",
"Required": "Optional",
"Description": "File to un-pin."
},
{
"Argument": "file_comment",
"Required": "Optional",
"Description": "File comment to un-pin."
},
{
"Argument": "timestamp",
"Required": "Optional",
"Description": "Timestamp of the message to un-pin."
},
],
"reactions.add": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: reactions:write"
},
{
"Argument": "name",
"Required": "Required",
"Description": "Reaction (emoji) name."
},
{
"Argument": "file",
"Required": "Optional",
"Description": "File to add reaction to."
},
{
"Argument": "file_comment",
"Required": "Optional",
"Description": "File comment to add reaction to."
},
{
"Argument": "channel",
"Required": "Optional",
"Description": "Channel ID where the message to add reaction to was posted."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name where the message to add reaction to was posted."
},
{
"Argument": "timestamp",
"Required": "Optional",
"Description": "Timestamp of the message to add reaction to."
},
],
"reactions.get": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: reactions:read"
},
{
"Argument": "file",
"Required": "Optional",
"Description": "File to get reactions for."
},
{
"Argument": "file_comment",
"Required": "Optional",
"Description": "File comment to get reactions for."
},
{
"Argument": "channel",
"Required": "Optional",
"Description": "Channel ID where the message to get reactions for was posted."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name where the message to get reactions for was posted."
},
{
"Argument": "timestamp",
"Required": "Optional",
"Description": "Timestamp of the message to get reactions for."
},
{
"Argument": "full",
"Required": "Optional",
"Description": "If true always return the complete reaction list."
},
],
"reactions.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: reactions:read"
},
{
"Argument": "user",
"Required": "Optional",
"Description": "Show reactions made by this user ID. Defaults to the authed user."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "Show reactions made by this user name."
},
{
"Argument": "full",
"Required": "Optional",
"Description": "If true always return the complete reaction list."
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of items to return per page. default=100"
},
{
"Argument": "page",
"Required": "Optional",
"Description": "Page number of results to return. default=1"
},
],
"reactions.remove": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: reactions:write"
},
{
"Argument": "name",
"Required": "Required",
"Description": "Reaction (emoji) name."
},
{
"Argument": "file",
"Required": "Optional",
"Description": "File to remove reaction from."
},
{
"Argument": "file_comment",
"Required": "Optional",
"Description": "File comment to remove reaction from."
},
{
"Argument": "channel",
"Required": "Optional",
"Description": "Channel ID where the message to remove reaction from was posted."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name where the message to remove reaction from was posted."
},
{
"Argument": "timestamp",
"Required": "Optional",
"Description": "Timestamp of the message to remove reaction from."
},
],
"reminders.add": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: reminders:write"
},
{
"Argument": "text",
"Required": "Required",
"Description": "The content of the reminder"
},
{
"Argument": "time",
"Required": "Required",
"Description": 'When this reminder should happen: the Unix timestamp (up to five years from now), the number of seconds until the reminder (if within 24 hours), or a natural language description (Ex. "in 15 minutes," or "every Thursday")'
},
{
"Argument": "user",
"Required": "Optional",
"Description": "The user ID who will receive the reminder. If no user is specified, the reminder will go to user who created it."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "The user name who will receive the reminder."
},
],
"reminders.complete": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: reminders:write"
},
{
"Argument": "reminder",
"Required": "Required",
"Description": "The ID of the reminder to be marked as complet"
},
],
"reminders.delete": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: reminders:write"
},
{
"Argument": "reminder",
"Required": "Required",
"Description": "The ID of the reminder"
},
],
"reminders.info": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: reminders:read"
},
{
"Argument": "reminder",
"Required": "Required",
"Description": "The ID of the reminder"
},
],
"reminders.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: reminders:read"
},
],
"search.all": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: search:read"
},
{
"Argument": "query",
"Required": "Required",
"Description": "Search query. May contains booleans, etc"
},
{
"Argument": "sort",
"Required": "Optional",
"Description": "Return matches sorted by either score or timestamp. default=score"
},
{
"Argument": "sort_dir",
"Required": "Optional",
"Description": "Change sort direction to ascending (asc) or descending (desc). default=desc"
},
{
"Argument": "highlight",
"Required": "Optional",
"Description": "Pass a value of true to enable query highlight markers"
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of items to return per page. default=20"
},
{
"Argument": "page",
"Required": "Optional",
"Description": "Page number of results to return. default=1"
},
],
"search.files": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: search:read"
},
{
"Argument": "query",
"Required": "Required",
"Description": "Search query. May contains booleans, etc"
},
{
"Argument": "sort",
"Required": "Optional",
"Description": "Return matches sorted by either score or timestamp. default=score"
},
{
"Argument": "sort_dir",
"Required": "Optional",
"Description": "Change sort direction to ascending (asc) or descending (desc). default=desc"
},
{
"Argument": "highlight",
"Required": "Optional",
"Description": "Pass a value of true to enable query highlight markers"
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of items to return per page. default=20"
},
{
"Argument": "page",
"Required": "Optional",
"Description": "Page number of results to return. default=1"
},
],
"search.messages": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: search:read"
},
{
"Argument": "query",
"Required": "Required",
"Description": "Search query. May contains booleans, etc"
},
{
"Argument": "sort",
"Required": "Optional",
"Description": "Return matches sorted by either score or timestamp. default=score"
},
{
"Argument": "sort_dir",
"Required": "Optional",
"Description": "Change sort direction to ascending (asc) or descending (desc). default=desc"
},
{
"Argument": "highlight",
"Required": "Optional",
"Description": "Pass a value of true to enable query highlight markers"
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of items to return per page. default=20"
},
{
"Argument": "page",
"Required": "Optional",
"Description": "Page number of results to return. default=1"
},
],
"stars.add": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: stars:write"
},
{
"Argument": "file",
"Required": "Optional",
"Description": "File to add star to."
},
{
"Argument": "file_comment",
"Required": "Optional",
"Description": "File comment to add star to."
},
{
"Argument": "channel",
"Required": "Optional",
"Description": "Channel ID to add star to, or channel where the message to add star to was posted (used with timestamp)"
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to add star to, or channel where the message to add star to was posted (used with timestamp)"
},
{
"Argument": "timestamp",
"Required": "Optional",
"Description": "Timestamp of the message to add star to."
},
],
"stars.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: stars:read"
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of items to return per page. default=100"
},
{
"Argument": "page",
"Required": "Optional",
"Description": "Page number of results to return. default=1"
},
],
"stars.remove": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: stars:write"
},
{
"Argument": "file",
"Required": "Optional",
"Description": "File to remove star from."
},
{
"Argument": "file_comment",
"Required": "Optional",
"Description": "File comment to remove star from."
},
{
"Argument": "channel",
"Required": "Optional",
"Description": "Channel ID to remove star from, or channel where the message to remove star from was posted (used with timestamp)."
},
{
"extra_function": "channel_name_to_id",
"Argument": "ex-channel",
"Required": "Optional",
"Description": "Channel name to remove star from, or channel where the message to remove star from was posted (used with timestamp)."
},
{
"Argument": "timestamp",
"Required": "Optional",
"Description": "Timestamp of the message to remove star from."
},
],
"team.accessLogs": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: admin"
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of items to return per page. default=100"
},
{
"Argument": "page",
"Required": "Optional",
"Description": "Page number of results to return. default=1"
},
{
"Argument": "before",
"Required": "Optional",
"Description": "End of time range of logs to include in results (inclusive). default=now"
},
],
"team.billableInfo": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: admin"
},
{
"Argument": "user",
"Required": "Optional",
"Description": "A user ID to retrieve the billable information for. Defaults to all users."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "A user name to retrieve the billable information for."
},
],
"team.info": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: team:read"
},
],
"team.integrationLogs": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: admin"
},
{
"Argument": "service_id",
"Required": "Optional",
"Description": "Filter logs to this service. Defaults to all logs."
},
{
"Argument": "app_id",
"Required": "Optional",
"Description": "Filter logs to this Slack app. Defaults to all logs."
},
{
"Argument": "user",
"Required": "Optional",
"Description": "Filter logs generated by this user's (ID) actions. Defaults to all logs."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "Filter logs generated by this user's (name) actions. Defaults to all logs."
},
{
"Argument": "change_type",
"Required": "Optional",
"Description": "Filter logs with this change type. Defaults to all logs."
},
{
"Argument": "count",
"Required": "Optional",
"Description": "Number of items to return per page."
},
{
"Argument": "page",
"Required": "Optional",
"Description": "Page number of results to return."
},
],
"team.profile.get": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: users.profile:read"
},
{
"Argument": "visibility",
"Required": "Optional",
"Description": "Filter by visibility."
},
],
"usergroups.create": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: usergroups:write"
},
{
"Argument": "name",
"Required": "Required",
"Description": "A name for the User Group. Must be unique among User Groups."
},
{
"Argument": "handle",
"Required": "Optional",
"Description": "A mention handle. Must be unique among channels, users and User Groups."
},
{
"Argument": "description",
"Required": "Optional",
"Description": "A short description of the User Group."
},
{
"Argument": "channels",
"Required": "Optional",
"Description": "A comma separated string of encoded channel IDs for which the User Group uses as a default."
},
{
"extra_function": "channel_names_to_ids",
"Argument": "ex-channels",
"Required": "Optional",
"Description": "A comma separated string of channel names for which the User Group uses as a default."
},
{
"Argument": "include_count",
"Required": "Optional",
"Description": "Include the number of users in each User Group."
},
],
"usergroups.disable": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: usergroups:write"
},
{
"Argument": "usergroup",
"Required": "Required",
"Description": "The encoded ID of the User Group to disable."
},
{
"Argument": "include_count",
"Required": "Optional",
"Description": "Include the number of users in the User Group."
},
],
"usergroups.enable": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: usergroups:write"
},
{
"Argument": "usergroup",
"Required": "Required",
"Description": "The encoded ID of the User Group to disable."
},
{
"Argument": "include_count",
"Required": "Optional",
"Description": "Include the number of users in the User Group."
},
],
"usergroups.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: usergroups:read"
},
{
"Argument": "include_disabled",
"Required": "Optional",
"Description": "Include disabled User Groups."
},
{
"Argument": "include_count",
"Required": "Optional",
"Description": "Include the number of users in the User Group."
},
{
"Argument": "include_users",
"Required": "Optional",
"Description": "Include the list of users for each User Group."
},
],
"usergroups.update": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: usergroups:write"
},
{
"Argument": "usergroup",
"Required": "Required",
"Description": "The encoded ID of the User Group to update."
},
{
"Argument": "name",
"Required": "Optional",
"Description": "A name for the User Group. Must be unique among User Groups."
},
{
"Argument": "handle",
"Required": "Optional",
"Description": "A mention handle. Must be unique among channels, users and User Groups."
},
{
"Argument": "description",
"Required": "Optional",
"Description": "A short description of the User Group."
},
{
"Argument": "channels",
"Required": "Optional",
"Description": "A comma separated string of encoded channel IDs for which the User Group uses as a default."
},
{
"extra_function": "channel_names_to_ids",
"Argument": "ex-channels",
"Required": "Optional",
"Description": "A comma separated string of channel names for which the User Group uses as a default."
},
{
"Argument": "include_count",
"Required": "Optional",
"Description": "Include the number of users in the User Group."
},
],
"usergroups.users.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: usergroups:read"
},
{
"Argument": "usergroup",
"Required": "Required",
"Description": "The encoded ID of the User Group to update."
},
{
"Argument": "include_disabled",
"Required": "Optional",
"Description": "Allow results that involve disabled User Groups."
},
],
"usergroups.users.update": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: usergroups:write"
},
{
"Argument": "usergroup",
"Required": "Required",
"Description": "The encoded ID of the User Group to update."
},
{
"Argument": "users",
# "Required": "Required",
"Required": "Optional",
"Description": "A comma separated string of encoded user IDs that represent the entire list of users for the User Group."
},
{
"extra_function": "user_names_to_ids",
"Argument": "ex-users",
"Required": "Optional",
"Description": "A comma separated string of user names that represent the entire list of users for the User Group."
},
{
"Argument": "include_count",
"Required": "Optional",
"Description": "Include the number of users in the User Group."
},
],
"users.deletePhoto": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: users.profile:write"
},
],
"users.getPresence": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: users:read"
},
{
"Argument": "user",
# "Required": "Required",
"Required": "Optional",
"Description": "User ID to get presence info on. Defaults to the authed user."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "User name to get presence info on. Defaults to the authed user."
},
],
"users.identity": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: identity.basic"
},
],
"users.info": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: users:read"
},
{
"Argument": "user",
# "Required": "Required",
"Required": "Optional",
"Description": "User ID to get info on"
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "User name to get info on"
},
],
"users.list": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: users:read"
},
{
"Argument": "presence",
"Required": "Optional",
"Description": "Whether to include presence data in the output"
},
],
"users.setActive": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: users:write"
},
],
"users.setPhoto": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: users.profile:write"
},
{
"Argument": "image",
"Required": "Required",
"Description": "File contents via multipart/form-data."
},
{
"Argument": "crop_x",
"Required": "Optional",
"Description": "X coordinate of top-left corner of crop box"
},
{
"Argument": "crop_y",
"Required": "Optional",
"Description": "Y coordinate of top-left corner of crop box"
},
{
"Argument": "crop_w",
"Required": "Optional",
"Description": "Width/height of crop box (always square)"
},
],
"users.setPresence": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: users:write"
},
{
"Argument": "presence",
"Required": "Required",
"Description": "Either auto or away"
},
],
"users.profile.get": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: users.profile:read"
},
{
"Argument": "user",
"Required": "Optional",
"Description": "User ID to retrieve profile info for"
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "User name to retrieve profile info for"
},
{
"Argument": "include_labels",
"Required": "Optional",
"Description": "Include labels for each ID in custom profile fields. default=false"
},
],
"users.profile.set": [
{
"Argument": "token",
"Required": "Required",
"Description": "Authentication token. Requires scope: users.profile:write"
},
{
"Argument": "user",
"Required": "Optional",
"Description": "ID of user to change. This argument may only be specified by team admins on paid teams."
},
{
"extra_function": "user_name_to_id",
"Argument": "ex-user",
"Required": "Optional",
"Description": "name of user to change. This argument may only be specified by team admins on paid teams."
},
{
"Argument": "profile",
"Required": "Optional",
"Description": "Collection of key:value pairs presented as a URL-encoded JSON hash."
},
{
"Argument": "name",
"Required": "Optional",
"Description": "Name of a single key to set. Usable only if profile is not passed."
},
{
"Argument": "value",
"Required": "Optional",
"Description": "Value to set a single key to. Usable only if profile is not passed."
},
],
}
| 37.067621
| 330
| 0.434305
| 6,670
| 93,188
| 6.017241
| 0.068516
| 0.106441
| 0.179619
| 0.076592
| 0.878759
| 0.855587
| 0.838395
| 0.821901
| 0.805606
| 0.779569
| 0
| 0.001572
| 0.447182
| 93,188
| 2,513
| 331
| 37.082372
| 0.777506
| 0.017352
| 0
| 0.489897
| 0
| 0.009485
| 0.474204
| 0.001694
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000412
| true
| 0.004536
| 0
| 0.000412
| 0.000825
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b158e40cced29033e11197f7a23c2b3f08d8955
| 420
|
py
|
Python
|
Platforms/Web/Processing/Api/Discord/Configs/Whitelistedlinks/__init__.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 2
|
2017-09-14T08:07:55.000Z
|
2021-05-18T05:05:05.000Z
|
Platforms/Web/Processing/Api/Discord/Configs/Whitelistedlinks/__init__.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 111
|
2018-04-15T14:32:14.000Z
|
2021-03-28T21:06:29.000Z
|
Platforms/Web/Processing/Api/Discord/Configs/Whitelistedlinks/__init__.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 1
|
2018-04-15T13:24:44.000Z
|
2018-04-15T13:24:44.000Z
|
import Platforms.Web.Processing.Api.Discord.Configs.Whitelistedlinks.create as create
import Platforms.Web.Processing.Api.Discord.Configs.Whitelistedlinks.delete as delete
import Platforms.Web.Processing.Api.Discord.Configs.Whitelistedlinks.errors as errors
import Platforms.Web.Processing.Api.Discord.Configs.Whitelistedlinks.get as get
import Platforms.Web.Processing.Api.Discord.Configs.Whitelistedlinks.main as main
| 70
| 85
| 0.869048
| 55
| 420
| 6.636364
| 0.254545
| 0.205479
| 0.246575
| 0.383562
| 0.835616
| 0.835616
| 0.835616
| 0.835616
| 0
| 0
| 0
| 0
| 0.047619
| 420
| 5
| 86
| 84
| 0.9125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 11
|
6b19304816bda17461d2876c28ba4a8106b37a44
| 7,389
|
gyp
|
Python
|
cloud_print/virtual_driver/win/install/virtual_driver_install.gyp
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8
|
2016-02-08T11:59:31.000Z
|
2020-05-31T15:19:54.000Z
|
cloud_print/virtual_driver/win/install/virtual_driver_install.gyp
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2021-05-05T11:11:31.000Z
|
2021-05-05T11:11:31.000Z
|
cloud_print/virtual_driver/win/install/virtual_driver_install.gyp
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 7
|
2016-02-09T09:28:14.000Z
|
2020-07-25T19:03:36.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'target_defaults': {
'variables': {
'chromium_code': 1,
},
'include_dirs': [
'<(DEPTH)',
],
},
'targets' : [
{
'target_name': 'virtual_driver_setup',
'type': 'executable',
'include_dirs': [
# To allow including "version.h"
'<(SHARED_INTERMEDIATE_DIR)',
],
'dependencies': [
'../virtual_driver.gyp:virtual_driver_lib',
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/cloud_print/common/common.gyp:cloud_print_install_lib',
'<(DEPTH)/cloud_print/cloud_print_resources.gyp:cloud_print_version_resources',
'virtual_driver_setup_resources',
],
'sources': [
'setup.cc',
'<(SHARED_INTERMEDIATE_DIR)/cloud_print/virtual_driver_setup_exe_version.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_ar.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_bg.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_bn.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_ca.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_cs.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_da.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_de.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_el.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_en.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_en-GB.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_es.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_es-419.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_et.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_fa.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_fi.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_fil.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_fr.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_gu.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_he.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_hi.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_hr.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_hu.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_id.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_it.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_ja.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_kn.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_ko.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_lt.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_lv.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_ml.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_mr.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_ms.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_nb.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_nl.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_pl.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_pt-BR.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_pt-PT.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_ro.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_ru.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_sk.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_sl.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_sr.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_sv.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_ta.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_te.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_th.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_tr.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_uk.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_vi.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_zh-CN.rc',
'<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources/virtual_driver_setup_resources_zh-TW.rc',
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
'AdditionalDependencies': [
'setupapi.lib',
],
'DelayLoadDLLs': [
'winspool.drv',
],
},
},
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'../../gcp_driver.gpd',
],
},
],
},
{
'target_name': 'virtual_driver_setup_resources',
'type': 'none',
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/virtual_driver_setup_resources',
},
'actions': [
{
'action_name': 'virtual_driver_setup_resources',
'variables': {
'grit_grd_file': 'virtual_driver_setup_resources.grd',
},
'includes': [ '../../../../build/grit_action.gypi' ],
},
],
'includes': [ '../../../../build/grit_target.gypi' ],
},
],
}
| 60.073171
| 109
| 0.749086
| 843
| 7,389
| 5.953737
| 0.16726
| 0.287507
| 0.390915
| 0.575613
| 0.821478
| 0.795178
| 0.795178
| 0.785615
| 0.785615
| 0.785615
| 0
| 0.001424
| 0.144539
| 7,389
| 122
| 110
| 60.565574
| 0.792596
| 0.028827
| 0
| 0.128205
| 0
| 0
| 0.803626
| 0.751743
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.025641
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
866c92352c243e46ca4530671fe001fcc7758fa2
| 58,737
|
py
|
Python
|
utils.py
|
coasxu/FedMA
|
21f4d32338fd2563ebd97c737e3b9f4f470029d9
|
[
"MIT"
] | 254
|
2020-02-14T07:45:36.000Z
|
2022-03-30T01:36:07.000Z
|
utils.py
|
coasxu/FedMA
|
21f4d32338fd2563ebd97c737e3b9f4f470029d9
|
[
"MIT"
] | 14
|
2020-05-01T18:21:06.000Z
|
2022-02-21T03:50:52.000Z
|
utils.py
|
coasxu/FedMA
|
21f4d32338fd2563ebd97c737e3b9f4f470029d9
|
[
"MIT"
] | 72
|
2020-02-20T12:16:25.000Z
|
2022-02-19T09:59:59.000Z
|
import os
import argparse
import json
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import logging
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as data
from itertools import product
import math
import copy
import time
from sklearn.metrics import confusion_matrix
# we've changed to a faster solver
#from scipy.optimize import linear_sum_assignment
import logging
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.init as init
from datasets import MNIST_truncated, CIFAR10_truncated, ImageFolderTruncated, CIFAR10ColorGrayScaleTruncated
from combine_nets import prepare_uniform_weights, prepare_sanity_weights, prepare_weight_matrix, normalize_weights, get_weighted_average_pred
from vgg import *
from model import *
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def mkdirs(dirpath):
try:
os.makedirs(dirpath)
except Exception as _:
pass
def parse_class_dist(net_class_config):
cls_net_map = {}
for net_idx, net_classes in enumerate(net_class_config):
for net_cls in net_classes:
if net_cls not in cls_net_map:
cls_net_map[net_cls] = []
cls_net_map[net_cls].append(net_idx)
return cls_net_map
def record_net_data_stats(y_train, net_dataidx_map, logdir):
net_cls_counts = {}
for net_i, dataidx in net_dataidx_map.items():
unq, unq_cnt = np.unique(y_train[dataidx], return_counts=True)
tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))}
net_cls_counts[net_i] = tmp
logging.debug('Data statistics: %s' % str(net_cls_counts))
return net_cls_counts
def partition_data(dataset, datadir, logdir, partition, n_nets, alpha, args):
if dataset == 'mnist':
X_train, y_train, X_test, y_test = load_mnist_data(datadir)
n_train = X_train.shape[0]
elif dataset == 'cifar10':
X_train, y_train, X_test, y_test = load_cifar10_data(datadir)
n_train = X_train.shape[0]
elif dataset == 'cinic10':
_train_dir = './data/cinic10/cinic-10-trainlarge/train'
cinic_mean = [0.47889522, 0.47227842, 0.43047404]
cinic_std = [0.24205776, 0.23828046, 0.25874835]
trainset = ImageFolderTruncated(_train_dir, transform=transforms.Compose([transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(Variable(x.unsqueeze(0),
requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=cinic_mean,std=cinic_std),
]))
y_train = trainset.get_train_labels
n_train = y_train.shape[0]
if partition == "homo":
idxs = np.random.permutation(n_train)
batch_idxs = np.array_split(idxs, n_nets)
net_dataidx_map = {i: batch_idxs[i] for i in range(n_nets)}
elif partition == "hetero-dir":
min_size = 0
K = 10
N = y_train.shape[0]
net_dataidx_map = {}
while min_size < 10:
idx_batch = [[] for _ in range(n_nets)]
# for each class in the dataset
for k in range(K):
idx_k = np.where(y_train == k)[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(alpha, n_nets))
## Balance
proportions = np.array([p*(len(idx_j)<N/n_nets) for p,idx_j in zip(proportions,idx_batch)])
proportions = proportions/proportions.sum()
proportions = (np.cumsum(proportions)*len(idx_k)).astype(int)[:-1]
idx_batch = [idx_j + idx.tolist() for idx_j,idx in zip(idx_batch,np.split(idx_k,proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
for j in range(n_nets):
np.random.shuffle(idx_batch[j])
net_dataidx_map[j] = idx_batch[j]
elif partition == "hetero-fbs":
# in this part we conduct a experimental study on exploring the effect of increasing the number of batches
# but the number of data points are approximately fixed for each batch
# the arguments we need to use here are: `args.partition_step_size`, `args.local_points`, `args.partition_step`(step can be {0, 1, ..., args.partition_step_size - 1}).
# Note that `args.partition` need to be fixed as "hetero-fbs" where fbs means fixed batch size
net_dataidx_map = {}
# stage 1st: homo partition
idxs = np.random.permutation(n_train)
total_num_batches = int(n_train/args.local_points) # e.g. currently we have 180k, we want each local batch has 5k data points the `total_num_batches` becomes 36
step_batch_idxs = np.array_split(idxs, args.partition_step_size)
sub_partition_size = int(total_num_batches / args.partition_step_size) # e.g. for `total_num_batches` at 36 and `args.partition_step_size` at 6, we have `sub_partition_size` at 6
# stage 2nd: hetero partition
n_batches = (args.partition_step + 1) * sub_partition_size
min_size = 0
K = 10
#N = len(step_batch_idxs[args.step])
baseline_indices = np.concatenate([step_batch_idxs[i] for i in range(args.partition_step + 1)])
y_train = y_train[baseline_indices]
N = y_train.shape[0]
while min_size < 10:
idx_batch = [[] for _ in range(n_batches)]
# for each class in the dataset
for k in range(K):
idx_k = np.where(y_train == k)[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(alpha, n_batches))
## Balance
proportions = np.array([p*(len(idx_j)<N/n_batches) for p,idx_j in zip(proportions,idx_batch)])
proportions = proportions/proportions.sum()
proportions = (np.cumsum(proportions)*len(idx_k)).astype(int)[:-1]
idx_batch = [idx_j + idx.tolist() for idx_j,idx in zip(idx_batch,np.split(idx_k,proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
# we leave this to the end
for j in range(n_batches):
np.random.shuffle(idx_batch[j])
net_dataidx_map[j] = idx_batch[j]
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
return y_train, net_dataidx_map, traindata_cls_counts, baseline_indices
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
#return (X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts)
return y_train, net_dataidx_map, traindata_cls_counts
def partition_data_dist_skew(dataset, datadir, logdir, partition, n_nets, alpha, args):
if dataset == 'mnist':
pass
elif dataset == 'cifar10':
normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(
Variable(x.unsqueeze(0), requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# data prep for test set
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize])
# load training and test set here:
training_set = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=None)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=None)
y_train = np.array(copy.deepcopy(training_set.targets))
n_train = training_set.data.shape[0]
entire_gray_scale_indices_train = []
entire_gray_scale_indices_test = []
# we start an adjust version here:
#########################################################################################################################################
# in this setting, we corelate the majority / minory with the class
# i.e. we firstly do an extreme version where we randomly sample 5 out of 10 groups s.t. in those groups there are only grayscale images
# for the other five groups, we leave all images to be colored images
#########################################################################################################################################
grayscale_dominate_classes = np.random.choice(np.arange(10), 5, replace=False)
logger.info("Grayscale image dominated classes are : {}".format(grayscale_dominate_classes))
# we split all grayscale dominate classes to client 0 and all color dominate classes to client1
client0_indices = []
client1_indices = []
for i in range(10):
if i in grayscale_dominate_classes:
logger.info("Grayscale dominate class index: {}".format(i))
class_indices_train = np.where(np.array(training_set.targets) == i)[0]
# we fix this to be one first
###
# this is the extreme case, we now change to a relatexed case
###
#num_of_gray_scale_per_class_train = int(1.0 * class_indices_train.shape[0])
num_of_gray_scale_per_class_train = int(0.95 * class_indices_train.shape[0])
class_gray_scale_indices_train = np.random.choice(class_indices_train, num_of_gray_scale_per_class_train, replace=False)
client0_indices.append(class_indices_train)
else:
logger.info("Color dominate class index: {}".format(i))
class_indices_train = np.where(np.array(training_set.targets) == i)[0]
num_of_gray_scale_per_class_train = int(0.05 * class_indices_train.shape[0])
class_gray_scale_indices_train = np.random.choice(class_indices_train, num_of_gray_scale_per_class_train, replace=False)
client1_indices.append(class_indices_train)
entire_gray_scale_indices_train.append(class_gray_scale_indices_train)
entire_gray_scale_indices_train = np.concatenate(entire_gray_scale_indices_train)
client0_indices = np.concatenate(client0_indices)
client1_indices = np.concatenate(client1_indices)
###
# extreme case:
###
for i in range(10):
class_indices_test = np.where(np.array(testset.targets) == i)[0]
# training set contains skewness, but in test set colored and gray-scale images are evenly distributed
num_of_gray_scale_per_class_test = int(0.5 * class_indices_test.shape[0])
class_gray_scale_indices_test = np.random.choice(class_indices_test, num_of_gray_scale_per_class_test, replace=False)
entire_gray_scale_indices_test.append(class_gray_scale_indices_test)
logger.info("Num of gray scale image per class test: {}".format(class_gray_scale_indices_test.shape[0]))
entire_gray_scale_indices_test = np.concatenate(entire_gray_scale_indices_test)
logger.info("Total Num of gray scale image test: {}".format(entire_gray_scale_indices_test.shape[0]))
elif dataset == 'cinic10':
pass
if partition == "homo":
net_dataidx_map = {}
idxs = np.arange(n_train)
indices_colored = np.array([i for i in idxs if i not in entire_gray_scale_indices_train])
# we split grayscale and colored images on two workers entirely
net_dataidx_map[0] = client0_indices
net_dataidx_map[1] = client1_indices
elif partition == "hetero-dir":
pass
elif partition == "hetero-fbs":
pass
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
return y_train, net_dataidx_map, traindata_cls_counts, entire_gray_scale_indices_train, entire_gray_scale_indices_test
def partition_data_dist_skew_baseline(dataset, datadir, logdir, partition, n_nets, alpha, args):
'''
This is for one of the baseline we're going to use for rebuttal of ICLR2020
Where the entire training dataset and the entire test dataset are with all grayscale images
'''
if dataset == 'mnist':
pass
elif dataset == 'cifar10':
normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(
Variable(x.unsqueeze(0), requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# data prep for test set
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize])
# load training and test set here:
training_set = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=None)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=None)
y_train = np.array(copy.deepcopy(training_set.targets))
n_train = training_set.data.shape[0]
n_test = testset.data.shape[0]
entire_gray_scale_indices_train = np.arange(n_train)
entire_gray_scale_indices_test = np.arange(n_test)
elif dataset == 'cinic10':
pass
if partition == "homo":
net_dataidx_map = {}
idxs = np.arange(n_train)
indices_colored = np.array([i for i in idxs if i not in entire_gray_scale_indices_train])
# we split grayscale and colored images on two workers entirely
net_dataidx_map[0] = np.arange(n_train)
elif partition == "hetero-dir":
pass
elif partition == "hetero-fbs":
pass
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
return y_train, net_dataidx_map, traindata_cls_counts, entire_gray_scale_indices_train, entire_gray_scale_indices_test
#return y_train, entire_gray_scale_indices_train, entire_gray_scale_indices_test
def partition_data_dist_skew_normal(dataset, datadir, logdir, partition, n_nets, alpha, args):
'''
This is for one of the baseline we're going to use for rebuttal of ICLR2020
Where the entire training dataset and the entire test dataset are with all grayscale images
'''
if dataset == 'mnist':
pass
elif dataset == 'cifar10':
normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(
Variable(x.unsqueeze(0), requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# data prep for test set
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize])
# load training and test set here:
training_set = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=None)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=None)
y_train = np.array(copy.deepcopy(training_set.targets))
n_train = training_set.data.shape[0]
n_test = testset.data.shape[0]
entire_gray_scale_indices_train = []
entire_gray_scale_indices_test = np.arange(n_test)
elif dataset == 'cinic10':
pass
if partition == "homo":
net_dataidx_map = {}
idxs = np.arange(n_train)
indices_colored = np.array([i for i in idxs if i not in entire_gray_scale_indices_train])
# we split grayscale and colored images on two workers entirely
net_dataidx_map[0] = np.arange(n_train)
elif partition == "hetero-dir":
pass
elif partition == "hetero-fbs":
pass
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
return y_train, net_dataidx_map, traindata_cls_counts, entire_gray_scale_indices_train, entire_gray_scale_indices_test
def partition_data_dist_skew_baseline_balanced(dataset, datadir, logdir, partition, n_nets, alpha, args):
if dataset == 'mnist':
pass
elif dataset == 'cifar10':
normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(
Variable(x.unsqueeze(0), requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# data prep for test set
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize])
# load training and test set here:
training_set = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=None)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=None)
y_train = np.array(copy.deepcopy(training_set.targets))
n_train = training_set.data.shape[0]
entire_gray_scale_indices_train = []
entire_gray_scale_indices_test = []
# we start an adjust version here:
#########################################################################################################################################
# in this setting, we corelate the majority / minory with the class
# i.e. we firstly do an extreme version where we randomly sample 5 out of 10 groups s.t. in those groups there are only grayscale images
# for the other five groups, we leave all images to be colored images
#########################################################################################################################################
grayscale_dominate_classes = np.random.choice(np.arange(10), 5, replace=False)
logger.info("Grayscale image dominated classes are : {}".format(grayscale_dominate_classes))
# we split all grayscale dominate classes to client 0 and all color dominate classes to client1
for i in range(10):
if i in grayscale_dominate_classes:
logger.info("Grayscale dominate class index: {}".format(i))
class_indices_train = np.where(np.array(training_set.targets) == i)[0]
# we fix this to be one first
###
# this is the extreme case, we now change to a relatexed case
###
#num_of_gray_scale_per_class_train = int(1.0 * class_indices_train.shape[0])
num_of_gray_scale_per_class_train = int(0.5 * class_indices_train.shape[0])
class_gray_scale_indices_train = np.random.choice(class_indices_train, num_of_gray_scale_per_class_train, replace=False)
else:
logger.info("Color dominate class index: {}".format(i))
class_indices_train = np.where(np.array(training_set.targets) == i)[0]
num_of_gray_scale_per_class_train = int(0.5 * class_indices_train.shape[0])
class_gray_scale_indices_train = np.random.choice(class_indices_train, num_of_gray_scale_per_class_train, replace=False)
entire_gray_scale_indices_train.append(class_gray_scale_indices_train)
entire_gray_scale_indices_train = np.concatenate(entire_gray_scale_indices_train)
####################
# in this test case
####################
for i in range(10):
class_indices_test = np.where(np.array(testset.targets) == i)[0]
# training set contains skewness, but in test set colored and gray-scale images are evenly distributed
num_of_gray_scale_per_class_test = int(0.5 * class_indices_test.shape[0])
class_gray_scale_indices_test = np.random.choice(class_indices_test, num_of_gray_scale_per_class_test, replace=False)
entire_gray_scale_indices_test.append(class_gray_scale_indices_test)
logger.info("Num of gray scale image per class test: {}".format(class_gray_scale_indices_test.shape[0]))
entire_gray_scale_indices_test = np.concatenate(entire_gray_scale_indices_test)
logger.info("Total Num of gray scale image test: {}".format(entire_gray_scale_indices_test.shape[0]))
elif dataset == 'cinic10':
pass
if partition == "homo":
net_dataidx_map = {}
idxs = np.arange(n_train)
net_dataidx_map[0] = np.arange(n_train)
elif partition == "hetero-dir":
pass
elif partition == "hetero-fbs":
pass
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
return y_train, net_dataidx_map, traindata_cls_counts, entire_gray_scale_indices_train, entire_gray_scale_indices_test
#return y_train, entire_gray_scale_indices_train, entire_gray_scale_indices_test
def partition_data_dist_skew_baseline_oversampled(dataset, datadir, logdir, partition, n_nets, alpha, args):
if dataset == 'mnist':
pass
elif dataset == 'cifar10':
normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(
Variable(x.unsqueeze(0), requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# data prep for test set
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize])
# load training and test set here:
training_set = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=None)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=None)
y_train = np.array(copy.deepcopy(training_set.targets))
n_train = training_set.data.shape[0]
entire_gray_scale_indices_train = []
entire_gray_scale_indices_test = []
# we start an adjust version here:
#########################################################################################################################################
# in this setting, we corelate the majority / minory with the class
# i.e. we firstly do an extreme version where we randomly sample 5 out of 10 groups s.t. in those groups there are only grayscale images
# for the other five groups, we leave all images to be colored images
#########################################################################################################################################
grayscale_dominate_classes = np.random.choice(np.arange(10), 5, replace=False)
logger.info("Grayscale image dominated classes are : {}".format(grayscale_dominate_classes))
# we split all grayscale dominate classes to client 0 and all color dominate classes to client1
entire_indices = []
for i in range(10):
if i in grayscale_dominate_classes:
logger.info("Grayscale dominate class index: {}".format(i))
class_indices_train = np.where(np.array(training_set.targets) == i)[0]
# we fix this to be one first
###
# this is the extreme case, we now change to a relatexed case
###
# here we created the dominated images
num_of_gray_scale_per_class_train = int(0.95 * class_indices_train.shape[0])
class_gray_scale_indices_train = np.random.choice(class_indices_train, num_of_gray_scale_per_class_train, replace=False)
entire_indices.append(class_indices_train)
# here we need to oversample the underrepresented images
class_color_indices_train = [idx for idx in class_indices_train if idx not in class_gray_scale_indices_train]
logger.info("Length gray scale image train: {}".format(len(class_gray_scale_indices_train)))
logger.info("Length gray scale image test: {}".format(len(class_color_indices_train)))
# we use this way to mitigate the data bias by oversampling
for i in range(int(0.95/0.05)):
entire_indices.append(class_color_indices_train)
else:
logger.info("Color dominate class index: {}".format(i))
class_indices_train = np.where(np.array(training_set.targets) == i)[0]
num_of_gray_scale_per_class_train = int(0.05 * class_indices_train.shape[0])
class_gray_scale_indices_train = np.random.choice(class_indices_train, num_of_gray_scale_per_class_train, replace=False)
logger.info("Length gray scale image train: {}".format(len(class_gray_scale_indices_train)))
entire_indices.append(class_indices_train)
# we use this way to mitigate the data bias by oversampling
for i in range(int(0.95/0.05)):
entire_indices.append(class_gray_scale_indices_train)
entire_gray_scale_indices_train.append(class_gray_scale_indices_train)
entire_gray_scale_indices_train = np.concatenate(entire_gray_scale_indices_train)
entire_indices = np.concatenate(entire_indices)
logger.info("Entire indices: {}".format(len(entire_indices)))
###
# extreme case:
###
for i in range(10):
class_indices_test = np.where(np.array(testset.targets) == i)[0]
# training set contains skewness, but in test set colored and gray-scale images are evenly distributed
num_of_gray_scale_per_class_test = int(0.5 * class_indices_test.shape[0])
class_gray_scale_indices_test = np.random.choice(class_indices_test, num_of_gray_scale_per_class_test, replace=False)
entire_gray_scale_indices_test.append(class_gray_scale_indices_test)
logger.info("Num of gray scale image per class test: {}".format(class_gray_scale_indices_test.shape[0]))
entire_gray_scale_indices_test = np.concatenate(entire_gray_scale_indices_test)
logger.info("Total Num of gray scale image test: {}".format(entire_gray_scale_indices_test.shape[0]))
elif dataset == 'cinic10':
pass
if partition == "homo":
net_dataidx_map = {}
idxs = np.arange(n_train)
indices_colored = np.array([i for i in idxs if i not in entire_gray_scale_indices_train])
# we split grayscale and colored images on two workers entirely
net_dataidx_map[0] = entire_indices
elif partition == "hetero-dir":
pass
elif partition == "hetero-fbs":
pass
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
return y_train, net_dataidx_map, traindata_cls_counts, entire_gray_scale_indices_train, entire_gray_scale_indices_test
#return y_train, net_dataidx_map, entire_gray_scale_indices_train, entire_gray_scale_indices_test
def partition_data_viz(dataset, datadir, logdir, partition, n_nets, alpha, args):
if dataset == 'mnist':
pass
elif dataset == 'cifar10':
# load training and test set here:
training_set = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=None)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=None)
y_train = np.array(copy.deepcopy(training_set.targets))
n_train = training_set.data.shape[0]
# we start an adjust version here:
###############################################################################################
# Our strategy is like the following:
# we split the data of CIFAR=10 dataset to two different clients
# and those two clients only share on common class
###############################################################################################
# the class 5 (i.e. dog) is shared across the two clients
#classes_client1 = [class_index for class_index in range(0, 7)]
#classes_client2 = [class_index for class_index in range(2, 10)]
#classes_client1 = [0, 1]
#classes_client2 = [8, 9]
classes_client1 = [0, 1, 2, 3]
classes_client2 = [6, 7, 8, 9]
# we split all grayscale dominate classes to client 0 and all color dominate classes to client1
client0_indices = []
client1_indices = []
for ci in range(10):
class_indices_train = np.where(np.array(training_set.targets) == ci)[0]
#logger.info("############# class index: {}, class_indices: {}".format(ci, class_indices_train))
if ci in classes_client1:
logger.info("Client 1 exclusive classes: {}".format(ci))
client0_indices.append(class_indices_train)
elif ci in classes_client2:
logger.info("Client 2 exclusive classes: {}".format(ci))
client1_indices.append(class_indices_train)
else:
# here we handel the shared class
num_of_dp_per_client = int(0.5 * class_indices_train.shape[0])
shared_class_indices_client0 = np.random.choice(class_indices_train, num_of_dp_per_client, replace=False)
shared_class_indices_client1 = [idx for idx in class_indices_train if idx not in shared_class_indices_client0]
client0_indices.append(shared_class_indices_client0)
client1_indices.append(shared_class_indices_client1)
logger.info("shared_class_indices_client0: {}, length: {}, shared_class_indices_client1: {}, length: {}".format(
shared_class_indices_client0, len(shared_class_indices_client0), shared_class_indices_client1, len(shared_class_indices_client1)))
client0_indices = np.concatenate(client0_indices)
client1_indices = np.concatenate(client1_indices)
elif dataset == 'cinic10':
pass
if partition == "homo":
net_dataidx_map = {}
idxs = np.arange(n_train)
# we split grayscale and colored images on two workers entirely
net_dataidx_map[0] = client0_indices
net_dataidx_map[1] = client1_indices
elif partition == "hetero-dir":
pass
elif partition == "hetero-fbs":
pass
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
return y_train, net_dataidx_map, traindata_cls_counts
def partition_data_viz2(dataset, datadir, logdir, partition, n_nets, alpha, args):
if dataset == 'mnist':
pass
elif dataset == 'cifar10':
normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(
Variable(x.unsqueeze(0), requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# data prep for test set
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize])
# load training and test set here:
training_set = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=None)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=None)
y_train = np.array(copy.deepcopy(training_set.targets))
n_train = training_set.data.shape[0]
entire_gray_scale_indices_train = []
entire_gray_scale_indices_test = []
# we start an adjust version here:
###############################################################################################
# in this setting, we corelate the majority / minory with the class
# i.e. we firstly do an extreme version where we randomly sample 5 out of 10 groups
# s.t. in those groups there are only grayscale images
# for the other five groups, we leave all images to be colored images
###############################################################################################
#grayscale_dominate_classes = np.random.choice(np.arange(10), 5, replace=False)
grayscale_dominate_classes = np.arange(0, 6)
logger.info("Grayscale image dominated classes are : {}".format(grayscale_dominate_classes))
# we split all grayscale dominate classes to client 0 and all color dominate classes to client1
client0_indices = []
client1_indices = []
for i in range(10):
if i in grayscale_dominate_classes:
logger.info("Grayscale dominate class index: {}".format(i))
class_indices_train = np.where(np.array(training_set.targets) == i)[0]
# we fix this to be one first
###
# this is the extreme case, we now change to a relatexed case
###
num_of_gray_scale_per_class_train = int(1.0 * class_indices_train.shape[0])
#num_of_gray_scale_per_class_train = int(0.95 * class_indices_train.shape[0])
class_gray_scale_indices_train = np.random.choice(class_indices_train, num_of_gray_scale_per_class_train, replace=False)
client0_indices.append(class_indices_train)
entire_gray_scale_indices_train.append(class_gray_scale_indices_train)
else:
logger.info("Color dominate class index: {}".format(i))
class_indices_train = np.where(np.array(training_set.targets) == i)[0]
#num_of_gray_scale_per_class_train = int(0.05 * class_indices_train.shape[0])
num_of_gray_scale_per_class_train = int(0.0 * class_indices_train.shape[0])
#class_gray_scale_indices_train = np.random.choice(class_indices_train, num_of_gray_scale_per_class_train, replace=False)
client1_indices.append(class_indices_train)
#entire_gray_scale_indices_train.append(class_gray_scale_indices_train)
entire_gray_scale_indices_train = np.concatenate(entire_gray_scale_indices_train)
client0_indices = np.concatenate(client0_indices)
client1_indices = np.concatenate(client1_indices)
###
# extreme case:
###
for i in range(10):
class_indices_test = np.where(np.array(testset.targets) == i)[0]
# training set contains skewness, but in test set colored and gray-scale images are evenly distributed
num_of_gray_scale_per_class_test = int(0.5 * class_indices_test.shape[0])
class_gray_scale_indices_test = np.random.choice(class_indices_test, num_of_gray_scale_per_class_test, replace=False)
entire_gray_scale_indices_test.append(class_gray_scale_indices_test)
logger.info("Num of gray scale image per class test: {}".format(class_gray_scale_indices_test.shape[0]))
entire_gray_scale_indices_test = np.concatenate(entire_gray_scale_indices_test)
logger.info("Total Num of gray scale image test: {}".format(entire_gray_scale_indices_test.shape[0]))
elif dataset == 'cinic10':
pass
if partition == "homo":
net_dataidx_map = {}
idxs = np.arange(n_train)
indices_colored = np.array([i for i in idxs if i not in entire_gray_scale_indices_train])
# we split grayscale and colored images on two workers entirely
net_dataidx_map[0] = client0_indices
net_dataidx_map[1] = client1_indices
elif partition == "hetero-dir":
pass
elif partition == "hetero-fbs":
pass
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
return y_train, net_dataidx_map, traindata_cls_counts, entire_gray_scale_indices_train, entire_gray_scale_indices_test
def load_mnist_data(datadir):
transform = transforms.Compose([transforms.ToTensor()])
mnist_train_ds = MNIST_truncated(datadir, train=True, download=True, transform=transform)
mnist_test_ds = MNIST_truncated(datadir, train=False, download=True, transform=transform)
X_train, y_train = mnist_train_ds.data, mnist_train_ds.target
X_test, y_test = mnist_test_ds.data, mnist_test_ds.target
X_train = X_train.data.numpy()
y_train = y_train.data.numpy()
X_test = X_test.data.numpy()
y_test = y_test.data.numpy()
return (X_train, y_train, X_test, y_test)
def load_cifar10_data(datadir):
transform = transforms.Compose([transforms.ToTensor()])
cifar10_train_ds = CIFAR10_truncated(datadir, train=True, download=True, transform=transform)
cifar10_test_ds = CIFAR10_truncated(datadir, train=False, download=True, transform=transform)
X_train, y_train = cifar10_train_ds.data, cifar10_train_ds.target
X_test, y_test = cifar10_test_ds.data, cifar10_test_ds.target
return (X_train, y_train, X_test, y_test)
def compute_accuracy(model, dataloader, get_confusion_matrix=False, device="cpu"):
was_training = False
if model.training:
model.eval()
was_training = True
true_labels_list, pred_labels_list = np.array([]), np.array([])
correct, total = 0, 0
with torch.no_grad():
for batch_idx, (x, target) in enumerate(dataloader):
x, target = x.to(device), target.to(device)
out = model(x)
_, pred_label = torch.max(out.data, 1)
total += x.data.size()[0]
correct += (pred_label == target.data).sum().item()
if device == "cpu":
pred_labels_list = np.append(pred_labels_list, pred_label.numpy())
true_labels_list = np.append(true_labels_list, target.data.numpy())
else:
pred_labels_list = np.append(pred_labels_list, pred_label.cpu().numpy())
true_labels_list = np.append(true_labels_list, target.data.cpu().numpy())
if get_confusion_matrix:
conf_matrix = confusion_matrix(true_labels_list, pred_labels_list)
if was_training:
model.train()
if get_confusion_matrix:
return correct/float(total), conf_matrix
return correct/float(total)
def init_cnns(net_configs, n_nets):
'''
Initialize the local CNNs
Please note that this part is hard coded right now
'''
input_size = (16 * 5 * 5) # hard coded, defined by the SimpleCNN useds
output_size = net_configs[-1] #
hidden_sizes = [120, 84]
cnns = {net_i: None for net_i in range(n_nets)}
# we add this book keeping to store meta data of model weights
model_meta_data = []
layer_type = []
for cnn_i in range(n_nets):
cnn = SimpleCNN(input_size, hidden_sizes, output_size)
cnns[cnn_i] = cnn
for (k, v) in cnns[0].state_dict().items():
model_meta_data.append(v.shape)
layer_type.append(k)
#logger.info("Layer name: {}, layer shape: {}".format(k, v.shape))
return cnns, model_meta_data, layer_type
def init_models(net_configs, n_nets, args):
'''
Initialize the local LeNets
Please note that this part is hard coded right now
'''
cnns = {net_i: None for net_i in range(n_nets)}
# we add this book keeping to store meta data of model weights
model_meta_data = []
layer_type = []
for cnn_i in range(n_nets):
if args.model == "lenet":
cnn = LeNet()
elif args.model == "vgg":
cnn = vgg11()
elif args.model == "simple-cnn":
if args.dataset in ("cifar10", "cinic10"):
cnn = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=10)
elif args.dataset == "mnist":
cnn = SimpleCNNMNIST(input_dim=(16 * 4 * 4), hidden_dims=[120, 84], output_dim=10)
elif args.model == "moderate-cnn":
if args.dataset == "mnist":
cnn = ModerateCNNMNIST()
elif args.dataset in ("cifar10", "cinic10"):
cnn = ModerateCNN()
cnns[cnn_i] = cnn
for (k, v) in cnns[0].state_dict().items():
model_meta_data.append(v.shape)
layer_type.append(k)
#logger.info("{} ::: Layer name: {}, layer shape: {}".format(args.model, k, v.shape))
return cnns, model_meta_data, layer_type
def save_model(model, model_index):
logger.info("saving local model-{}".format(model_index))
with open("trained_local_model"+str(model_index), "wb") as f_:
torch.save(model.state_dict(), f_)
return
def load_model(model, model_index, rank=0, device="cpu"):
#
with open("trained_local_model"+str(model_index), "rb") as f_:
model.load_state_dict(torch.load(f_))
model.to(device)
return model
def save_model_dist_skew(model, model_index):
logger.info("saving local model-{} dist skew".format(model_index))
with open("trained_local_model_dist_skew"+str(model_index), "wb") as f_:
torch.save(model.state_dict(), f_)
return
def load_model_dist_skew(model, model_index, device="cpu"):
logger.info("loading local model-{} dist skew".format(model_index))
with open("trained_local_model_dist_skew"+str(model_index), "rb") as f_:
model.load_state_dict(torch.load(f_))
model.to(device)
return model
def save_model_viz(model, model_index):
logger.info("saving local model-{} visulization".format(model_index))
with open("trained_local_model_viz{}_new".format(model_index), "wb") as f_:
torch.save(model.state_dict(), f_)
return
def load_model_viz(model, model_index, device="cpu"):
logger.info("loading local model-{} visulization".format(model_index))
with open("trained_local_model_viz"+str(model_index), "rb") as f_:
model.load_state_dict(torch.load(f_))
model.to(device)
return model
def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None):
if dataset in ('mnist', 'cifar10'):
if dataset == 'mnist':
dl_obj = MNIST_truncated
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
elif dataset == 'cifar10':
dl_obj = CIFAR10_truncated
normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(
Variable(x.unsqueeze(0), requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# data prep for test set
transform_test = transforms.Compose([transforms.ToTensor(),normalize])
train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False)
elif dataset == 'cinic10':
# statistic for normalizing the dataset
cinic_mean = [0.47889522, 0.47227842, 0.43047404]
cinic_std = [0.24205776, 0.23828046, 0.25874835]
cinic_directory = './data/cinic10'
training_set = ImageFolderTruncated(cinic_directory + '/cinic-10-trainlarge/train',
dataidxs=dataidxs,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(Variable(x.unsqueeze(0), requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=cinic_mean,std=cinic_std),
]))
train_dl = torch.utils.data.DataLoader(training_set, batch_size=train_bs, shuffle=True)
logger.info("Len of training set: {}, len of imgs in training set: {}, len of train dl: {}".format(len(training_set), len(training_set.imgs), len(train_dl)))
test_dl = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(cinic_directory + '/cinic-10-trainlarge/test',
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=cinic_mean,std=cinic_std)])), batch_size=test_bs, shuffle=False)
return train_dl, test_dl
def get_dataloader_dist_skew(dataset, datadir, train_bs, test_bs, dataidxs=None, gray_scale_indices_train=None, gray_scale_indices_test=None):
if dataset in ('mnist', 'cifar10'):
if dataset == 'mnist':
pass
elif dataset == 'cifar10':
normalize_colored = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
normalize_gray_scale = transforms.Normalize(mean=[x/255.0 for x in [125.3, 125.3, 125.3]],
std=[x/255.0 for x in [63.0, 63.0, 63.0]])
transform_train_color = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(
Variable(x.unsqueeze(0), requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_colored,
])
transform_train_gray_scale = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(
Variable(x.unsqueeze(0), requires_grad=False),
(4,4,4,4),mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_gray_scale,
])
# data prep for test set
transform_test_color = transforms.Compose([
transforms.ToTensor(),
normalize_colored])
transform_test_gray_scale = transforms.Compose([
transforms.ToTensor(),
normalize_gray_scale])
train_ds = CIFAR10ColorGrayScaleTruncated(datadir, dataidxs=dataidxs, gray_scale_indices=gray_scale_indices_train,
train=True, transform_color=transform_train_color,
transofrm_gray_scale=transform_train_gray_scale,
download=True)
test_ds = CIFAR10ColorGrayScaleTruncated(datadir, gray_scale_indices=gray_scale_indices_test,
train=False, transform_color=transform_test_color,
transofrm_gray_scale=transform_test_gray_scale,
download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False)
elif dataset == 'cinic10':
pass
return train_dl, test_dl
def pdm_prepare_full_weights_cnn(nets, device="cpu"):
"""
we extract all weights of the conv nets out here:
"""
weights = []
for net_i, net in enumerate(nets):
net_weights = []
statedict = net.state_dict()
for param_id, (k, v) in enumerate(statedict.items()):
if device == "cpu":
if 'fc' in k or 'classifier' in k:
if 'weight' in k:
net_weights.append(v.numpy().T)
else:
net_weights.append(v.numpy())
elif 'conv' in k or 'features' in k:
if 'weight' in k:
_weight_shape = v.size()
if len(_weight_shape) == 4:
net_weights.append(v.numpy().reshape(_weight_shape[0], _weight_shape[1]*_weight_shape[2]*_weight_shape[3]))
else:
pass
else:
net_weights.append(v.numpy())
else:
if 'fc' in k or 'classifier' in k:
if 'weight' in k:
net_weights.append(v.cpu().numpy().T)
else:
net_weights.append(v.cpu().numpy())
elif 'conv' in k or 'features' in k:
if 'weight' in k:
_weight_shape = v.size()
if len(_weight_shape) == 4:
net_weights.append(v.cpu().numpy().reshape(_weight_shape[0], _weight_shape[1]*_weight_shape[2]*_weight_shape[3]))
else:
pass
else:
net_weights.append(v.cpu().numpy())
weights.append(net_weights)
return weights
def pdm_prepare_weights_vggs(nets, device="cpu"):
"""
Note that we only handle the FC parts and leave the conv layers as is
"""
weights = []
for net_i, net in enumerate(nets):
layer_i = 0
statedict = net.state_dict()
net_weights = []
for i, (k,v) in enumerate(statedict.items()):
if "classifier" in k:
if "weight" in k:
if device == "cpu":
net_weights.append(statedict[k].numpy().T)
else:
net_weights.append(statedict[k].cpu().numpy().T)
elif "bias" in k:
if device == "cpu":
net_weights.append(statedict[k].numpy())
else:
net_weights.append(statedict[k].cpu().numpy())
net_weights.insert(0, np.zeros(net_weights[0].shape[0], dtype=np.float32))
weights.append(net_weights)
return weights
def pdm_prepare_freq(cls_freqs, n_classes):
freqs = []
for net_i in sorted(cls_freqs.keys()):
net_freqs = [0] * n_classes
for cls_i in cls_freqs[net_i]:
net_freqs[cls_i] = cls_freqs[net_i][cls_i]
freqs.append(np.array(net_freqs))
return freqs
def compute_ensemble_accuracy(models: list, dataloader, n_classes, train_cls_counts=None, uniform_weights=False, sanity_weights=False, device="cpu"):
correct, total = 0, 0
true_labels_list, pred_labels_list = np.array([]), np.array([])
was_training = [False]*len(models)
for i, model in enumerate(models):
if model.training:
was_training[i] = True
model.eval()
if uniform_weights is True:
weights_list = prepare_uniform_weights(n_classes, len(models))
elif sanity_weights is True:
weights_list = prepare_sanity_weights(n_classes, len(models))
else:
weights_list = prepare_weight_matrix(n_classes, train_cls_counts)
weights_norm = normalize_weights(weights_list)
with torch.no_grad():
for batch_idx, (x, target) in enumerate(dataloader):
x, target = x.to(device), target.to(device)
target = target.long()
out = get_weighted_average_pred(models, weights_norm, x, device=device)
_, pred_label = torch.max(out, 1)
total += x.data.size()[0]
correct += (pred_label == target.data).sum().item()
if device == "cpu":
pred_labels_list = np.append(pred_labels_list, pred_label.numpy())
true_labels_list = np.append(true_labels_list, target.data.numpy())
else:
pred_labels_list = np.append(pred_labels_list, pred_label.cpu().numpy())
true_labels_list = np.append(true_labels_list, target.data.cpu().numpy())
#logger.info(correct, total)
conf_matrix = confusion_matrix(true_labels_list, pred_labels_list)
for i, model in enumerate(models):
if was_training[i]:
model.train()
return correct / float(total), conf_matrix
class ModerateCNNContainerConvBlocks(nn.Module):
def __init__(self, num_filters, output_dim=10):
super(ModerateCNNContainerConvBlocks, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=num_filters[0], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=num_filters[0], out_channels=num_filters[1], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=num_filters[1], out_channels=num_filters[2], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=num_filters[2], out_channels=num_filters[3], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=num_filters[3], out_channels=num_filters[4], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=num_filters[4], out_channels=num_filters[5], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
def forward(self, x):
x = self.conv_layer(x)
return x
| 45.960094
| 186
| 0.596643
| 7,135
| 58,737
| 4.661247
| 0.066994
| 0.040862
| 0.047628
| 0.042336
| 0.811113
| 0.78772
| 0.759998
| 0.744873
| 0.727193
| 0.717
| 0
| 0.024083
| 0.289545
| 58,737
| 1,278
| 187
| 45.960094
| 0.772897
| 0.117626
| 0
| 0.718037
| 0
| 0
| 0.047683
| 0.005148
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034247
| false
| 0.037671
| 0.028539
| 0
| 0.098174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
86778a4a64ad33dbb6efe9043e32a6e880a7e4ab
| 9,476
|
py
|
Python
|
pyathena/classic/plot_tools/plot_slices.py
|
changgoo/pyathena-1
|
c461ac3390d773537ce52393e3ebf68a3282aa46
|
[
"MIT"
] | 1
|
2019-10-03T13:59:14.000Z
|
2019-10-03T13:59:14.000Z
|
pyathena/classic/plot_tools/plot_slices.py
|
changgoo/pyathena-1
|
c461ac3390d773537ce52393e3ebf68a3282aa46
|
[
"MIT"
] | 3
|
2020-09-23T23:36:17.000Z
|
2022-01-11T06:16:56.000Z
|
pyathena/classic/plot_tools/plot_slices.py
|
changgoo/pyathena-1
|
c461ac3390d773537ce52393e3ebf68a3282aa46
|
[
"MIT"
] | 2
|
2019-06-10T04:26:16.000Z
|
2019-12-04T22:27:02.000Z
|
import glob
import os
import string
import pickle
import numpy as np
import matplotlib.colorbar as colorbar
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm,SymLogNorm,NoNorm,Normalize
from .scatter_sp import scatter_sp
from ..vtk_reader import read_starvtk
from ..utils import texteffect
from ..set_units import set_units
unit=set_units(muH=1.4271)
to_Myr=unit['time'].to('Myr').value
def slice(slcfname,starfname,fields_to_draw,zoom=1.,aux={},\
writefile=True,tstamp=True,stars=True,field_label=True,norm_factor=2):
plt.rc('font',size=14)
plt.rc('xtick',labelsize=14)
plt.rc('ytick',labelsize=14)
slc_data=pickle.load(open(slcfname,'rb'))
x0=slc_data['yextent'][0]
y0=slc_data['yextent'][3]
Lx=slc_data['yextent'][1]-slc_data['yextent'][0]
Lz=slc_data['yextent'][3]-slc_data['yextent'][2]
Lz=Lz/zoom
ix=2
iz=ix*Lz/Lx
nf=len(fields_to_draw)
fig=plt.figure(1,figsize=(ix*nf,iz+ix*2))
gs = gridspec.GridSpec(2,nf,height_ratios=[iz,ix])
gs.update(left=0.10,right=0.90,wspace=0,hspace=0)
sp=read_starvtk(starfname)
if 'time' in slc_data:
tMyr=slc_data['time']
else:
time,sp=read_starvtk(starfname,time_out=True)
tMyr=time*Myr
images=[]
for i,axis in enumerate(['y','z']):
for j,f in enumerate(fields_to_draw):
data=slc_data[axis][f]
ax=plt.subplot(gs[i,j])
im=ax.imshow(data,origin='lower')
if aux.has_key(f):
if aux[f].has_key('norm'): im.set_norm(aux[f]['norm'])
if aux[f].has_key('cmap'): im.set_cmap(aux[f]['cmap'])
if aux[f].has_key('clim'): im.set_clim(aux[f]['clim'])
extent=slc_data[axis+'extent']
im.set_extent(extent)
images.append(im)
if stars:
if j == 0:
scatter_sp(sp,ax,axis=axis,runaway=False,norm_factor=norm_factor)
elif j == 1:
scatter_sp(sp,ax,axis=axis,norm_factor=norm_factor)
ax.set_xlim(extent[0],extent[1])
ax.set_ylim(extent[2],extent[3])
gs2 = gridspec.GridSpec(nf+2+stars,1)
gs2.update(left=0.91,right=0.93,hspace=0.05)
for j,(im,f) in enumerate(zip(images,fields_to_draw)):
cax=plt.subplot(gs2[j+stars])
cbar = fig.colorbar(im,cax=cax,orientation='vertical')
if aux.has_key(f):
if 'label' in aux[f]: cbar.set_label(aux[f]['label'])
if 'cticks' in aux[f]: cbar.set_ticks(aux[f]['cticks'])
if stars:
cax=plt.subplot(gs2[0])
cbar = colorbar.ColorbarBase(cax, ticks=[0,20,40],
cmap=plt.cm.cool_r, norm=Normalize(vmin=0,vmax=40),
orientation='vertical')
cbar.set_label(r'${\rm age [Myr]}$')
axes=fig.axes[:2*nf]
if field_label:
for ax,f in zip(axes[:nf],fields_to_draw):
if aux.has_key(f):
if 'label' in aux[f]:
lab=aux[f]['label']
label=lab[:lab.rfind(r'\;')]+'$'
ax.text(0.5,0.95,label,size=20,horizontalalignment='center',
transform = ax.transAxes,**(texteffect()))
if stars:
s1=ax.scatter(Lx*2,Lz*2,
s=np.sqrt(1.e3)/norm_factor,color='k',
alpha=.8,label=r'$10^3 M_\odot$')
s2=ax.scatter(Lx*2,Lz*2,
s=np.sqrt(1.e4)/norm_factor,color='k',
alpha=.8,label=r'$10^4 M_\odot$')
s3=ax.scatter(Lx*2,Lz*2,
s=np.sqrt(1.e5)/norm_factor,
color='k',alpha=.8,label=r'$10^5 M_\odot$')
ax.set_xlim(x0,x0+Lx)
ax.set_ylim(y0,y0+Lz);
legend=ax.legend((s1,s2,s3),(r'$10^3 M_\odot$',r'$10^4 M_\odot$',r'$10^5 M_\odot$'),
scatterpoints = 1, loc='lower left',fontsize='medium',frameon=True)
plt.setp([ax.get_xticklabels() for ax in axes[:2*nf]],visible=False)
plt.setp([ax.get_yticklabels() for ax in axes[:2*nf]],visible=False)
plt.setp(axes[:nf],'ylim',(slc_data['yextent'][2]/zoom,slc_data['yextent'][3]/zoom))
plt.setp(axes[nf:2*nf],'xlabel','x [kpc]')
plt.setp(axes[0],'ylabel','z [kpc]')
if tstamp:
plt.setp(axes[0],'title','t=%3d Myr' % tMyr)
plt.setp(axes[nf],'ylabel','y [kpc]')
plt.setp([ax.get_xticklabels() for ax in axes[nf:]], visible=True)
plt.setp([ax.get_yticklabels() for ax in axes[:2*nf:nf]], visible=True)
plt.setp([ax.xaxis.get_majorticklabels() for ax in axes[nf:2*nf]], rotation=45 )
pngfname=slcfname[:-1]+'png'
#canvas = mpl.backends.backend_agg.FigureCanvasAgg(fig)
#canvas.print_figure(pngfname,num=1,dpi=150,bbox_inches='tight')
if writefile:
plt.savefig(pngfname,bbox_inches='tight',num=0,dpi=150)
plt.close()
else:
return fig
def slice2(slcfname,starfname,fields_to_draw,zoom=1.,aux={},\
writefile=True,tstamp=True,stars=True,field_label=True,norm_factor=2):
plt.rc('font',size=14)
plt.rc('xtick',labelsize=14)
plt.rc('ytick',labelsize=14)
slc_data=pickle.load(open(slcfname,'rb'))
x0=slc_data['yextent'][0]
y0=slc_data['yextent'][3]
Lx=slc_data['yextent'][1]-slc_data['yextent'][0]
Lz=slc_data['yextent'][3]-slc_data['yextent'][2]
Lz=Lz/zoom
ix=2
iz=ix*Lz/Lx
nf=len(fields_to_draw)
fig=plt.figure(1,figsize=(ix*nf,iz+ix*1.2))
gs = gridspec.GridSpec(2,nf,height_ratios=[iz,ix])
gs.update(top=0.95,left=0.10,right=0.95,wspace=0.05,hspace=0)
if stars:
sp=read_starvtk(starfname)
if 'time' in slc_data:
tMyr=slc_data['time']
else:
time,sp=read_starvtk(starfname,time_out=True)
tMyr=time*Myr
images=[]
for i,axis in enumerate(['y','z']):
for j,f in enumerate(fields_to_draw):
ax=plt.subplot(gs[i,j])
if f == 'star_particles':
scatter_sp(sp,ax,axis=axis,norm_factor=norm_factor,type='surf')
if axis == 'y':
ax.set_xlim(x0,x0+Lx)
ax.set_ylim(y0,y0+Lz);
if axis == 'z':
ax.set_xlim(x0,x0+Lx)
ax.set_ylim(x0,x0+Lx)
ax.set_aspect(1.0)
else:
data=slc_data[axis][f]
im=ax.imshow(data,origin='lower',interpolation='bilinear')
if aux.has_key(f):
if aux[f].has_key('norm'): im.set_norm(aux[f]['norm'])
if aux[f].has_key('cmap'): im.set_cmap(aux[f]['cmap'])
if aux[f].has_key('clim'): im.set_clim(aux[f]['clim'])
extent=slc_data[axis+'extent']
im.set_extent(extent)
images.append(im)
ax.set_xlim(extent[0],extent[1])
ax.set_ylim(extent[2],extent[3])
for j,(im,f) in enumerate(zip(images,fields_to_draw[1:])):
ax=plt.subplot(gs[0,j+1])
divider = make_axes_locatable(ax)
cax = divider.append_axes("top", "3%", pad="1%")
cbar = fig.colorbar(im,cax=cax,orientation='horizontal')
if aux.has_key(f):
if 'label' in aux[f]: cbar.set_label(aux[f]['label'])
if 'cticks' in aux[f]: cbar.set_ticks(aux[f]['cticks'])
cax.xaxis.tick_top()
cax.xaxis.set_label_position('top')
ax=plt.subplot(gs[0,0])
divider = make_axes_locatable(ax)
cax = divider.append_axes("top", "3%", pad="1%")
cbar = colorbar.ColorbarBase(cax, ticks=[0,20,40],
cmap=plt.cm.cool_r, norm=Normalize(vmin=0,vmax=40),
orientation='horizontal')
cax.xaxis.tick_top()
cax.xaxis.set_label_position('top')
cbar.set_label(r'${\rm age [Myr]}$')
s1=ax.scatter(Lx*2,Lz*2,
s=np.sqrt(1.e3)/norm_factor,color='k',
alpha=.8,label=r'$10^3 M_\odot$')
s2=ax.scatter(Lx*2,Lz*2,
s=np.sqrt(1.e4)/norm_factor,color='k',
alpha=.8,label=r'$10^4 M_\odot$')
s3=ax.scatter(Lx*2,Lz*2,
s=np.sqrt(1.e5)/norm_factor,
color='k',alpha=.8,label=r'$10^5 M_\odot$')
ax.set_xlim(x0,x0+Lx)
ax.set_ylim(y0,y0+Lz);
legend=ax.legend((s1,s2,s3),(r'$10^3 M_\odot$',r'$10^4 M_\odot$',r'$10^5 M_\odot$'),
scatterpoints = 1, loc='lower left',fontsize='medium',frameon=True)
axes=fig.axes
plt.setp([ax.get_xticklabels() for ax in axes[:2*nf]],visible=False)
plt.setp([ax.get_yticklabels() for ax in axes[:2*nf]],visible=False)
plt.setp(axes[:nf],'ylim',(slc_data['yextent'][2]/zoom,slc_data['yextent'][3]/zoom))
plt.setp(axes[nf:2*nf],'xlabel','x [kpc]')
plt.setp(axes[0],'ylabel','z [kpc]')
if tstamp:
ax=axes[0]
ax.text(0.5,0.95,'t=%3d Myr' % tMyr,size=16,horizontalalignment='center',
transform = ax.transAxes,**(texteffect()))
plt.setp(axes[nf],'ylabel','y [kpc]')
plt.setp([ax.get_xticklabels() for ax in axes[nf:]], visible=True)
plt.setp([ax.get_yticklabels() for ax in axes[:2*nf:nf]], visible=True)
plt.setp([ax.xaxis.get_majorticklabels() for ax in axes[nf:2*nf]], rotation=45 )
pngfname=slcfname+'ng'
#canvas = mpl.backends.backend_agg.FigureCanvasAgg(fig)
#canvas.print_figure(pngfname,num=1,dpi=150,bbox_inches='tight')
if writefile:
plt.savefig(pngfname,bbox_inches='tight',num=0,dpi=150)
plt.close()
else:
return fig
| 38.520325
| 93
| 0.5897
| 1,499
| 9,476
| 3.616411
| 0.151434
| 0.033573
| 0.041321
| 0.020291
| 0.832134
| 0.813872
| 0.770522
| 0.750968
| 0.750968
| 0.746541
| 0
| 0.037498
| 0.228894
| 9,476
| 245
| 94
| 38.677551
| 0.704393
| 0.024694
| 0
| 0.724299
| 0
| 0
| 0.082061
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009346
| false
| 0
| 0.065421
| 0
| 0.084112
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
86ecd81a5abe0273f3121e18a3e0acfab9f865c8
| 86
|
py
|
Python
|
utils/random_send.py
|
Lisovq/markov-chain-bot
|
b7fb8c1e47c0abda6ccc9dfed1d0b35b8a6eaff1
|
[
"MIT"
] | null | null | null |
utils/random_send.py
|
Lisovq/markov-chain-bot
|
b7fb8c1e47c0abda6ccc9dfed1d0b35b8a6eaff1
|
[
"MIT"
] | null | null | null |
utils/random_send.py
|
Lisovq/markov-chain-bot
|
b7fb8c1e47c0abda6ccc9dfed1d0b35b8a6eaff1
|
[
"MIT"
] | null | null | null |
from random import randint
def randomize() -> bool:
return randint(0, 33) == 24
| 14.333333
| 31
| 0.662791
| 12
| 86
| 4.75
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 0.22093
| 86
| 5
| 32
| 17.2
| 0.776119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
d4abaee803c70af9a4655798a481ba367692f435
| 174,357
|
py
|
Python
|
core/domain/suggestion_services_test.py
|
jlau323/oppia
|
37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691
|
[
"Apache-2.0"
] | 2
|
2021-04-08T01:06:08.000Z
|
2021-06-02T08:20:13.000Z
|
core/domain/suggestion_services_test.py
|
jlau323/oppia
|
37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691
|
[
"Apache-2.0"
] | null | null | null |
core/domain/suggestion_services_test.py
|
jlau323/oppia
|
37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691
|
[
"Apache-2.0"
] | 1
|
2020-12-11T06:56:31.000Z
|
2020-12-11T06:56:31.000Z
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for suggestion related services."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from constants import constants
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import question_domain
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import skill_services
from core.domain import state_domain
from core.domain import story_domain
from core.domain import story_services
from core.domain import suggestion_registry
from core.domain import suggestion_services
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
import utils
(suggestion_models, feedback_models, user_models) = (
models.Registry.import_models(
[models.NAMES.suggestion, models.NAMES.feedback, models.NAMES.user]
)
)
class SuggestionServicesUnitTests(test_utils.GenericTestBase):
"""Test the functions in suggestion_services."""
score_category = (
suggestion_models.SCORE_TYPE_CONTENT +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'Algebra')
target_id = 'exp1'
target_id_2 = 'exp2'
target_id_3 = 'exp3'
target_version_at_submission = 1
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
}
}
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
NORMAL_USER_EMAIL = 'normal@example.com'
THREAD_ID = 'exploration.exp1.thread_1'
COMMIT_MESSAGE = 'commit message'
EMPTY_COMMIT_MESSAGE = ' '
suggestion_id = THREAD_ID
suggestion_id_2 = 'exploration.exp2.thread_2'
suggestion_id_3 = 'exploration.exp3.thread_3'
def setUp(self):
super(SuggestionServicesUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.signup(self.NORMAL_USER_EMAIL, 'normaluser')
self.normal_user_id = self.get_user_id_from_email(
self.NORMAL_USER_EMAIL)
self.save_new_valid_exploration(
self.target_id, self.author_id, category='Algebra')
def assert_suggestion_status(self, suggestion_id, status):
"""Assert the status of the suggestion with suggestion_id."""
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
self.assertEqual(suggestion.status, status)
def mock_accept_suggestion(
self, suggestion_id, reviewer_id, commit_message, review_message):
"""Sets up the appropriate mocks to successfully call
accept_suggestion.
"""
with self.swap(
exp_services, 'update_exploration', self.mock_update_exploration):
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
with self.swap(
suggestion_registry.SuggestionEditStateContent,
'pre_accept_validate',
self.mock_pre_accept_validate_does_nothing):
with self.swap(
suggestion_registry.SuggestionEditStateContent,
'get_change_list_for_accepting_suggestion',
self.mock_get_change_list_does_nothing):
suggestion_services.accept_suggestion(
suggestion_id, reviewer_id,
commit_message, review_message)
def mock_create_suggestion(self, target_id):
"""Sets up the appropriate mocks to successfully call
create_suggestion.
"""
with self.swap(
feedback_models.GeneralFeedbackThreadModel,
'generate_new_thread_id', self.mock_generate_new_thread_id):
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
target_id, self.target_version_at_submission,
self.author_id, self.change, 'test description')
def mock_generate_new_thread_id(self, entity_type, exp_id):
thread_id = 'thread_%s' % exp_id[-1]
return '.'.join([entity_type, exp_id, thread_id])
class MockExploration(python_utils.OBJECT):
"""Mocks an exploration. To be used only for testing."""
def __init__(self, exploration_id, states):
self.id = exploration_id
self.states = states
self.category = 'Algebra'
# All mock explorations created for testing.
explorations = [
MockExploration('exp1', {'state_1': {}, 'state_2': {}}),
MockExploration('exp2', {'state_1': {}, 'state_2': {}}),
MockExploration('exp3', {'state_1': {}, 'state_2': {}})
]
def mock_get_exploration_by_id(self, exp_id):
for exp in self.explorations:
if exp.id == exp_id:
return exp
def mock_pre_accept_validate_does_nothing(self):
pass
def mock_get_change_list_does_nothing(self):
pass
def mock_accept_does_nothing(self, unused_arg):
pass
def test_create_new_suggestion_successfully(self):
expected_suggestion_dict = {
'suggestion_id': 'exploration.exp1.thread_1',
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': suggestion_models.TARGET_TYPE_EXPLORATION,
'target_id': self.target_id,
'target_version_at_submission': self.target_version_at_submission,
'status': suggestion_models.STATUS_IN_REVIEW,
'author_name': 'author',
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': None
},
'score_category': self.score_category,
'language_code': None
}
self.mock_create_suggestion(self.target_id)
observed_suggestion = suggestion_services.get_suggestion_by_id(
self.suggestion_id)
self.assertDictContainsSubset(
expected_suggestion_dict, observed_suggestion.to_dict())
def test_cannot_create_suggestion_with_invalid_suggestion_type(self):
with self.assertRaisesRegexp(Exception, 'Invalid suggestion type'):
suggestion_services.create_suggestion(
'invalid_suggestion_type',
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, self.change, 'test description')
def test_cannot_create_suggestion_with_invalid_author_id(self):
with self.assertRaisesRegexp(
Exception, 'Expected author_id to be in a valid user ID format'):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
'invalid author ID', self.change, 'test description')
def test_cannot_create_translation_suggestion_with_invalid_content_html_raise_error(self): # pylint: disable=line-too-long
add_translation_change_dict = {
'cmd': 'add_translation',
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>The invalid content html</p>',
'translation_html': '<p>Translation for invalid content.</p>'
}
with self.assertRaisesRegexp(
Exception,
'The given content_html does not match the content of the '
'exploration.'):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, add_translation_change_dict, 'test description')
def test_get_all_stale_suggestion_ids(self):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, self.change, 'test description')
with self.swap(
suggestion_models, 'THRESHOLD_TIME_BEFORE_ACCEPT_IN_MSECS', 0):
self.assertEqual(
len(suggestion_services.get_all_stale_suggestion_ids()), 1)
with self.swap(
suggestion_models, 'THRESHOLD_TIME_BEFORE_ACCEPT_IN_MSECS',
7 * 24 * 60 * 60 * 1000):
self.assertEqual(
len(suggestion_services.get_all_stale_suggestion_ids()), 0)
def mock_update_exploration(
self, unused_user_id, unused_exploration_id, unused_change_list,
commit_message, is_suggestion):
self.assertTrue(is_suggestion)
self.assertEqual(
commit_message, 'Accepted suggestion by %s: %s' % (
'author', self.COMMIT_MESSAGE))
def test_cannot_reject_suggestion_with_empty_review_message(self):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, self.change, 'test description')
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id), (
'target_id', self.target_id)])[0]
self.assert_suggestion_status(
suggestion.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
with self.assertRaisesRegexp(
Exception, 'Review message cannot be empty.'):
suggestion_services.reject_suggestion(
suggestion.suggestion_id, self.reviewer_id, '')
# Assert that the suggestion was not rejected.
self.assert_suggestion_status(
suggestion.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
def test_accept_suggestion_and_send_email_to_author(self):
change_list = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'state 1',
})]
exp_services.update_exploration(
self.author_id, self.target_id, change_list, 'Add state.')
new_suggestion_content = state_domain.SubtitledHtml(
'content', '<p>new suggestion content html</p>').to_dict()
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state 1',
'new_value': new_suggestion_content
}
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, change_dict, 'test description')
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id), (
'target_id', self.target_id)])[0]
self.assert_suggestion_status(
suggestion.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
# Create a user proficiency model to verify that the
# score and onboarding_email_sent fields have changed after the
# suggestion has been accepted.
user_models.UserContributionProficiencyModel.create(
self.author_id, suggestion.score_category, 0)
# An email is sent to users the first time that they pass the score
# required to review a suggestion category. By default, when a
# suggestion is accepted and the recording of scores is enabled, the
# score of the author of that suggestion is increased by 1. Therefore,
# by setting that increment to minimum score required to review, we can
# ensure that the email is sent.
with self.swap(feconf, 'ENABLE_RECORDING_OF_SCORES', True):
with self.swap(
feconf, 'SEND_SUGGESTION_REVIEW_RELATED_EMAILS', True):
with self.swap(
suggestion_models, 'INCREMENT_SCORE_OF_AUTHOR_BY',
feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW):
suggestion_services.accept_suggestion(
suggestion.suggestion_id, self.reviewer_id,
self.COMMIT_MESSAGE, 'review message')
# Assert that the suggestion is now accepted.
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id), (
'target_id', self.target_id)])[0]
self.assert_suggestion_status(
suggestion.suggestion_id, suggestion_models.STATUS_ACCEPTED)
# Assert that the email was sent and that the score increased by the
# correct amount.
user_proficiency_model = (
user_models.UserContributionProficiencyModel.get(
self.author_id, suggestion.score_category
)
)
self.assertTrue(user_proficiency_model.onboarding_email_sent)
self.assertEqual(
user_proficiency_model.score,
feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW)
def test_accept_suggestion_does_not_send_email_if_users_score_is_too_low(
self):
self.mock_create_suggestion(self.target_id)
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
# Create the user proficiency model to verify the score and
# that the onboarding_email_sent field does not change after the
# suggestion is accepted.
user_models.UserContributionProficiencyModel.create(
self.author_id, self.score_category, 0)
# An email is sent to users the first time that they pass the score
# required to review a suggestion category. By default, when a
# suggestion is accepted and the recording of scores is enabled, the
# score of the author of that suggestion is increased by 1. This is
# less than the minimum score required to review so an email should not
# be sent.
with self.swap(feconf, 'ENABLE_RECORDING_OF_SCORES', True):
with self.swap(
feconf, 'SEND_SUGGESTION_REVIEW_RELATED_EMAILS', True):
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'review message')
# Assert that the suggestion is now accepted.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_ACCEPTED)
user_proficiency_model = (
user_models.UserContributionProficiencyModel.get(
self.author_id, self.score_category
)
)
# Assert that the users score was updated correctly.
self.assertEqual(
user_proficiency_model.score,
suggestion_models.INCREMENT_SCORE_OF_AUTHOR_BY)
# Assert that their score is not high enough to review the category.
self.assertLess(
user_proficiency_model.score,
feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW)
# Assert that the onboarding new reviewer email was not sent.
self.assertFalse(user_proficiency_model.onboarding_email_sent)
def test_accept_suggestion_creates_user_proficiency_model_if_it_is_none(
self):
self.mock_create_suggestion(self.target_id)
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
# Verify that a user proficiency model does not exist.
self.assertIsNone(user_models.UserContributionProficiencyModel.get(
self.author_id, self.score_category))
with self.swap(feconf, 'ENABLE_RECORDING_OF_SCORES', True):
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'review message')
# Verify that a user proficiency model now exists.
self.assertIsNotNone(user_models.UserContributionProficiencyModel.get(
self.author_id, self.score_category))
def test_accept_suggestion_successfully(self):
self.mock_create_suggestion(self.target_id)
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'review message')
# Assert that the suggestion is now accepted.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_ACCEPTED)
suggestion = suggestion_services.get_suggestion_by_id(
self.suggestion_id)
self.assertEqual(
suggestion.final_reviewer_id, self.reviewer_id)
thread_messages = feedback_services.get_messages(self.THREAD_ID)
last_message = thread_messages[len(thread_messages) - 1]
self.assertEqual(
last_message.text, 'review message')
def test_accept_suggestion_raises_exception_if_suggestion_does_not_exist(
self):
expected_exception_regexp = (
'You cannot accept the suggestion with id %s because it does not '
'exist.' % (self.suggestion_id)
)
with self.assertRaisesRegexp(Exception, expected_exception_regexp):
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'review message')
def test_accept_suggestion_with_invalid_math_fails(self):
"""Test that the method for accepting suggestions raises error when
a suggestion with invalid math-tags is tried to be accepted.
"""
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': (
'<oppia-noninteractive-math raw_latex-with-value="&am'
'p;quot;(x - a_1)(x - a_2)(x - a_3)...(x - a_n)&q'
'uot;"></oppia-noninteractive-math>')
}
}
with self.swap(
feedback_models.GeneralFeedbackThreadModel,
'generate_new_thread_id', self.mock_generate_new_thread_id):
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, change_dict, 'test description')
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
expected_exception_regexp = (
'Invalid math tags found in the suggestion with id %s.' % (
self.suggestion_id)
)
with self.assertRaisesRegexp(Exception, expected_exception_regexp):
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'review message')
# Assert that the status of the suggestion hasn't changed.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
def test_accept_suggestion_raises_exception_if_suggestion_already_accepted(
self):
self.mock_create_suggestion(self.target_id)
# Accept the suggestion.
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE, None)
# Assert that the suggestion has been accepted.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_ACCEPTED)
expected_exception_regexp = (
'The suggestion with id %s has already been accepted/rejected.' % (
self.suggestion_id)
)
with self.assertRaisesRegexp(Exception, expected_exception_regexp):
suggestion_services.accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE, None)
def test_accept_suggestion_raises_exception_if_suggestion_already_rejected(
self):
self.mock_create_suggestion(self.target_id)
# Reject the suggestion.
suggestion_services.reject_suggestion(
self.suggestion_id, self.reviewer_id, 'reject review message'
)
# Assert that the suggestion has been rejected.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_REJECTED)
expected_exception_regexp = (
'The suggestion with id %s has already been accepted/rejected.' % (
self.suggestion_id)
)
with self.assertRaisesRegexp(Exception, expected_exception_regexp):
suggestion_services.accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE, None)
# Assert that the suggestion is still rejected.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_REJECTED)
def test_accept_suggestion_invalid_suggestion_failure(self):
self.mock_create_suggestion(self.target_id)
suggestion = suggestion_services.get_suggestion_by_id(
self.suggestion_id)
# Invalidating the suggestion.
suggestion.score_category = 'invalid_score_category'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected score_category to be of the form '
'score_type.score_sub_type, received '
'invalid_score_category'):
suggestion_services._update_suggestion(suggestion) # pylint: disable=protected-access
def test_accept_suggestion_no_commit_message_failure(self):
self.mock_create_suggestion(self.target_id)
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
with self.assertRaisesRegexp(
Exception, 'Commit message cannot be empty.'):
suggestion_services.accept_suggestion(
self.suggestion_id, self.reviewer_id,
self.EMPTY_COMMIT_MESSAGE, None)
# Assert that the status of the suggestion didn't change.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
def test_reject_suggestion_successfully(self):
self.mock_create_suggestion(self.target_id)
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
suggestion_services.reject_suggestion(
self.suggestion_id, self.reviewer_id, 'reject review message')
# Assert that the suggestion has been rejected.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_REJECTED)
suggestion = suggestion_services.get_suggestion_by_id(
self.suggestion_id)
self.assertEqual(
suggestion.final_reviewer_id, self.reviewer_id)
thread_messages = feedback_services.get_messages(self.THREAD_ID)
last_message = thread_messages[len(thread_messages) - 1]
self.assertEqual(last_message.text, 'reject review message')
def test_reject_suggestions_successfully(self):
# Create the first suggestion to be rejected.
self.mock_create_suggestion(self.target_id_2)
self.assert_suggestion_status(
self.suggestion_id_2, suggestion_models.STATUS_IN_REVIEW)
# Create another suggestion to be rejected.
self.mock_create_suggestion(self.target_id_3)
self.assert_suggestion_status(
self.suggestion_id_3, suggestion_models.STATUS_IN_REVIEW)
suggestion_ids = [self.suggestion_id_2, self.suggestion_id_3]
suggestion_services.reject_suggestions(
suggestion_ids, self.reviewer_id, 'reject review message')
for suggestion_id in suggestion_ids:
# Assert that the statuses changed to rejected.
self.assert_suggestion_status(
suggestion_id, suggestion_models.STATUS_REJECTED)
# Assert that the final reviewer id was updated.
suggestion = suggestion_services.get_suggestion_by_id(
suggestion_id)
self.assertEqual(
suggestion.final_reviewer_id, self.reviewer_id)
# Assert that the messages were updated.
thread_messages = feedback_services.get_messages(suggestion_id)
last_message = thread_messages[len(thread_messages) - 1]
self.assertEqual(
last_message.text, 'reject review message')
def test_reject_suggestion_raises_exception_if_suggestion_does_not_exist(
self):
expected_exception_regexp = (
'You cannot reject the suggestion with id %s because it does not '
'exist.' % (self.suggestion_id)
)
with self.assertRaisesRegexp(Exception, expected_exception_regexp):
suggestion_services.reject_suggestion(
self.suggestion_id, self.reviewer_id, 'review message')
def test_reject_suggestion_raises_exception_if_suggestion_already_accepted(
self):
self.mock_create_suggestion(self.target_id)
# Accept the suggestion.
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE, None)
# Assert that the suggestion has been accepted.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_ACCEPTED)
# Rejecting the suggestion should not work because the suggestion has
# already been accepted.
expected_exception_regexp = (
'The suggestion with id %s has already been accepted/rejected.' % (
self.suggestion_id)
)
with self.assertRaisesRegexp(Exception, expected_exception_regexp):
suggestion_services.reject_suggestion(
self.suggestion_id, self.reviewer_id, 'reject review message')
# Assert that the suggestion's status did not change.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_ACCEPTED)
def test_reject_suggestion_raises_exception_if_suggestion_already_rejected(
self):
self.mock_create_suggestion(self.target_id)
# Reject the suggestion.
suggestion_services.reject_suggestion(
self.suggestion_id, self.reviewer_id, 'reject review message')
# Assert that the suggestion has been rejected.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_REJECTED)
# Rejecting the suggestion should not work because the suggestion has
# already been rejected.
expected_exception_regexp = (
'The suggestion with id %s has already been accepted/rejected.' % (
self.suggestion_id)
)
with self.assertRaisesRegexp(Exception, expected_exception_regexp):
suggestion_services.reject_suggestion(
self.suggestion_id, self.reviewer_id, 'reject review message')
def test_resubmit_rejected_suggestion_success(self):
self.mock_create_suggestion(self.target_id)
# Reject the suggestion.
suggestion_services.reject_suggestion(
self.suggestion_id, self.reviewer_id, 'reject review message')
# Assert that the suggestion has been rejected.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_REJECTED)
# Create the new change for the resubmitted suggestion.
resubmit_change_content = state_domain.SubtitledHtml(
'content', '<p>resubmit change content html</p>').to_dict()
resubmit_change = exp_domain.ExplorationChange(
{
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': resubmit_change_content,
'old_value': self.change['new_value']
}
)
# Resubmit rejected suggestion.
suggestion_services.resubmit_rejected_suggestion(
self.suggestion_id, 'resubmit summary message', self.author_id,
resubmit_change)
# The suggestion's status should now be in review instead of rejected.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
# The suggestion's change should be updated.
suggestion = suggestion_services.get_suggestion_by_id(
self.suggestion_id)
self.assertEqual(
suggestion.change.new_value['html'],
resubmit_change_content['html'])
def test_resubmit_rejected_suggestion_raises_exception_for_empty_message(
self):
self.mock_create_suggestion(self.target_id)
# Can't resubmit a rejected suggestion if the summary message is empty.
with self.assertRaisesRegexp(
Exception, 'Summary message cannot be empty.'):
suggestion_services.resubmit_rejected_suggestion(
self.suggestion_id, '', self.author_id, {})
def test_resubmit_rejected_suggestion_raises_exception_for_unhandled_input(
self):
self.mock_create_suggestion(self.target_id)
# Can't resubmit a rejected suggestion if the suggestion hasn't been
# rejected yet.
expected_exception_regexp = (
'The suggestion with id %s is not yet handled.' % (
self.suggestion_id)
)
with self.assertRaisesRegexp(Exception, expected_exception_regexp):
suggestion_services.resubmit_rejected_suggestion(
self.suggestion_id, 'resubmit summary message',
self.author_id, {}
)
def test_resubmit_rejected_suggestion_raises_excep_for_accepted_suggestion(
self):
self.mock_create_suggestion(self.target_id)
# Accept the suggestion.
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'review message')
# Verfiy that the suggestion has been accepted.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_ACCEPTED)
# Can't resubmit the suggestion if it's already accepted.
expected_exception_regexp = (
'The suggestion with id %s was accepted. Only rejected '
'suggestions can be resubmitted.' % (
self.suggestion_id)
)
with self.assertRaisesRegexp(
Exception, expected_exception_regexp):
suggestion_services.resubmit_rejected_suggestion(
self.suggestion_id, 'resubmit summary message',
self.author_id, {}
)
# Verfiy that the suggestion is still accepted.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_ACCEPTED)
def test_check_can_resubmit_suggestion(self):
self.mock_create_suggestion(self.target_id)
can_resubmit = suggestion_services.check_can_resubmit_suggestion(
self.suggestion_id, self.author_id)
self.assertEqual(can_resubmit, True)
can_resubmit = suggestion_services.check_can_resubmit_suggestion(
self.suggestion_id, self.normal_user_id)
self.assertEqual(can_resubmit, False)
class SuggestionGetServicesUnitTests(test_utils.GenericTestBase):
score_category = (
suggestion_models.SCORE_TYPE_TRANSLATION +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'English')
target_id_1 = 'exp1'
target_id_2 = 'exp2'
target_id_3 = 'exp3'
target_version_at_submission = 1
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': 'new suggestion content'
}
AUTHOR_EMAIL_1 = 'author1@example.com'
REVIEWER_EMAIL_1 = 'reviewer1@example.com'
AUTHOR_EMAIL_2 = 'author2@example.com'
REVIEWER_EMAIL_2 = 'reviewer2@example.com'
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': 'state_1',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>State name: state_1, Content id: content</p>',
'translation_html': '<p>This is translated html.</p>'
}
class MockExploration(python_utils.OBJECT):
"""Mocks an exploration. To be used only for testing."""
def __init__(self, exploration_id, states):
self.id = exploration_id
self.states = states
self.category = 'Algebra'
def get_content_html(self, state_name, content_id):
"""Used to mock the get_content_html method for explorations."""
# state_name and content_id are used here to suppress the unused
# arguments warning. The main goal of this method is to just
# produce content html for the tests.
return '<p>State name: %s, Content id: %s</p>' % (
state_name, content_id
)
# All mock explorations created for testing.
explorations = [
MockExploration('exp1', {'state_1': {}, 'state_2': {}}),
MockExploration('exp2', {'state_1': {}, 'state_2': {}}),
MockExploration('exp3', {'state_1': {}, 'state_2': {}}),
]
def mock_get_exploration_by_id(self, exp_id):
for exp in self.explorations:
if exp.id == exp_id:
return exp
def _create_question_suggestion_with_skill_id(self, skill_id):
"""Creates a question suggestion with the given skill_id."""
suggestion_change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_SKILL, skill_id, 1,
self.author_id_1, suggestion_change, 'test description'
)
def _create_translation_suggestion_with_language_code(self, language_code):
"""Creates a translation suggestion with the language code given."""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': 'state_1',
'content_id': 'content',
'language_code': language_code,
'content_html': '<p>State name: state_1, Content id: content</p>',
'translation_html': '<p>This is translated html.</p>'
}
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
with self.swap(
exp_domain.Exploration, 'get_content_html',
self.MockExploration.get_content_html):
translation_suggestion = suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_1, 1, self.author_id_1,
add_translation_change_dict, 'test description')
return translation_suggestion
def setUp(self):
super(SuggestionGetServicesUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL_1, 'author1')
self.author_id_1 = self.get_user_id_from_email(self.AUTHOR_EMAIL_1)
self.signup(self.REVIEWER_EMAIL_1, 'reviewer1')
self.reviewer_id_1 = self.get_user_id_from_email(self.REVIEWER_EMAIL_1)
self.signup(self.AUTHOR_EMAIL_2, 'author2')
self.author_id_2 = self.get_user_id_from_email(self.AUTHOR_EMAIL_2)
self.signup(self.REVIEWER_EMAIL_2, 'reviewer2')
self.reviewer_id_2 = self.get_user_id_from_email(self.REVIEWER_EMAIL_2)
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_1, self.target_version_at_submission,
self.author_id_1, self.change, 'test description')
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_1, self.target_version_at_submission,
self.author_id_1, self.change, 'test description')
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_1, self.target_version_at_submission,
self.author_id_1, self.change, 'test description')
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_1, self.target_version_at_submission,
self.author_id_2, self.change, 'test description')
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_2, self.target_version_at_submission,
self.author_id_2, self.change, 'test description')
def test_get_by_author(self):
queries = [('author_id', self.author_id_1)]
self.assertEqual(len(suggestion_services.query_suggestions(queries)), 3)
queries = [('author_id', self.author_id_2)]
self.assertEqual(len(suggestion_services.query_suggestions(queries)), 2)
def test_get_by_target_id(self):
queries = [
('target_type', suggestion_models.TARGET_TYPE_EXPLORATION),
('target_id', self.target_id_1)
]
self.assertEqual(len(suggestion_services.query_suggestions(queries)), 4)
queries = [
('target_type', suggestion_models.TARGET_TYPE_EXPLORATION),
('target_id', self.target_id_2)
]
self.assertEqual(len(suggestion_services.query_suggestions(queries)), 1)
def test_get_by_status(self):
queries = [('status', suggestion_models.STATUS_IN_REVIEW)]
self.assertEqual(len(suggestion_services.query_suggestions(queries)), 5)
def test_get_by_type(self):
queries = [(
'suggestion_type',
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT)]
self.assertEqual(len(suggestion_services.query_suggestions(queries)), 5)
def test_query_suggestions(self):
queries = [
('target_type', suggestion_models.TARGET_TYPE_EXPLORATION),
('target_id', self.target_id_1),
('author_id', self.author_id_2)
]
self.assertEqual(len(suggestion_services.query_suggestions(queries)), 1)
queries = [
('target_type', suggestion_models.TARGET_TYPE_EXPLORATION),
('target_id', self.target_id_1),
('author_id', self.author_id_1),
('status', suggestion_models.STATUS_IN_REVIEW)
]
self.assertEqual(len(suggestion_services.query_suggestions(queries)), 3)
queries = [
('target_type', suggestion_models.TARGET_TYPE_EXPLORATION),
('target_id', self.target_id_1),
('invalid_field', 'value')
]
with self.assertRaisesRegexp(
Exception, 'Not allowed to query on field invalid_field'):
suggestion_services.query_suggestions(queries)
def test_get_translation_suggestion_ids_with_exp_ids_with_one_exp(self):
# Create the translation suggestion associated with exploration id
# target_id_1.
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
with self.swap(
exp_domain.Exploration, 'get_content_html',
self.MockExploration.get_content_html):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_1, 1, self.author_id_1,
self.add_translation_change_dict, 'test description')
# Assert that there is one translation suggestion with the given
# exploration id found.
self.assertEqual(
len(
suggestion_services
.get_translation_suggestion_ids_with_exp_ids(
[self.target_id_1])), 1)
def test_get_translation_suggestion_ids_with_exp_ids_with_multiple_exps(
self):
# Create the translation suggestion associated with exploration id
# target_id_2.
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
with self.swap(
exp_domain.Exploration, 'get_content_html',
self.MockExploration.get_content_html):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_2, 1, self.author_id_1,
self.add_translation_change_dict, 'test description')
# Create the translation suggestion associated with exploration id
# target_id_3.
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
with self.swap(
exp_domain.Exploration, 'get_content_html',
self.MockExploration.get_content_html):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_3, 1, self.author_id_1,
self.add_translation_change_dict, 'test description')
# Assert that there are two translation suggestions with the given
# exploration ids found.
self.assertEqual(
len(
suggestion_services
.get_translation_suggestion_ids_with_exp_ids(
[self.target_id_2, self.target_id_3])), 2)
def test_get_translation_suggestion_ids_with_exp_ids_with_invalid_exp(
self):
# Assert that there are no translation suggestions with an invalid
# exploration id found.
self.assertEqual(
len(
suggestion_services
.get_translation_suggestion_ids_with_exp_ids(
['invalid_exp_id'])), 0)
def test_get_translation_suggestion_ids_with_exp_ids_with_empty_exp_list(
self):
# Assert that there are no translation suggestions found when we
# use an empty exp_ids list.
self.assertEqual(
len(
suggestion_services
.get_translation_suggestion_ids_with_exp_ids([])), 0)
def test_get_translation_suggestions_waiting_longest_for_review_per_lang(
self):
suggestion_1 = self._create_translation_suggestion_with_language_code(
'hi')
suggestion_2 = self._create_translation_suggestion_with_language_code(
'hi')
suggestion_3 = self._create_translation_suggestion_with_language_code(
'hi')
suggestions = (
suggestion_services
.get_translation_suggestions_waiting_longest_for_review(
'hi'))
# Assert that the suggestions are in the order that they were created.
self.assertEqual(len(suggestions), 3)
self.assertEqual(
suggestions[0].suggestion_id, suggestion_1.suggestion_id)
self.assertEqual(
suggestions[1].suggestion_id, suggestion_2.suggestion_id)
self.assertEqual(
suggestions[2].suggestion_id, suggestion_3.suggestion_id)
for i in python_utils.RANGE(len(suggestions) - 1):
self.assertLess(
suggestions[i].last_updated, suggestions[i + 1].last_updated)
def test_get_translation_suggestions_waiting_longest_for_review_wrong_lang(
self):
suggestions = (
suggestion_services
.get_translation_suggestions_waiting_longest_for_review(
'wrong_language_code'))
self.assertEqual(len(suggestions), 0)
def test_get_question_suggestions_waiting_longest_for_review_keeps_order(
self):
"""This test makes sure that if a suggestion is rejected and is then
resubmitted, we count the time that the suggestion has been waiting for
review from when it was resubmitted, not from when it was first
submitted.
"""
suggestion_1 = self._create_question_suggestion_with_skill_id('skill1')
suggestion_2 = self._create_question_suggestion_with_skill_id('skill2')
# Verify that both suggestions are returned and in the right order.
suggestions = (
suggestion_services
.get_question_suggestions_waiting_longest_for_review()
)
self.assertEqual(len(suggestions), 2)
self.assertEqual(
suggestions[0].suggestion_id, suggestion_1.suggestion_id)
self.assertEqual(
suggestions[1].suggestion_id, suggestion_2.suggestion_id)
self.assertLess(
suggestions[0].last_updated, suggestions[1].last_updated)
# Reject the suggestion that was created first since it is the one that
# has been waiting the longest for review.
suggestion_services.reject_suggestion(
suggestion_1.suggestion_id, self.reviewer_id_1, 'Reject message')
# Verify that only the suggestion that was created second is returned.
suggestions = (
suggestion_services
.get_question_suggestions_waiting_longest_for_review()
)
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].suggestion_id, suggestion_2.suggestion_id)
# Change the question_dict of the question suggestion that got rejected
# so we can resubmit the suggestion for review.
resubmit_question_change = suggestion_1.change
resubmit_question_change.question_dict['linked_skill_ids'] = ['skill1']
# Resubmit the rejected question suggestion.
suggestion_services.resubmit_rejected_suggestion(
suggestion_1.suggestion_id, 'resubmit summary message',
self.author_id_1, resubmit_question_change
)
# Verify that both suggestions are returned again and the suggestion
# that was created second is now the first suggestion in the returned
# list, since it has been waiting longer (due to it not being updated).
suggestions = (
suggestion_services
.get_question_suggestions_waiting_longest_for_review()
)
self.assertEqual(len(suggestions), 2)
self.assertEqual(
suggestions[0].suggestion_id, suggestion_2.suggestion_id)
self.assertEqual(
suggestions[1].suggestion_id, suggestion_1.suggestion_id)
self.assertLess(
suggestions[0].last_updated, suggestions[1].last_updated)
def test_get_question_suggestions_waiting_longest_for_review(self):
suggestion_1 = self._create_question_suggestion_with_skill_id('skill1')
suggestion_2 = self._create_question_suggestion_with_skill_id('skill2')
suggestion_3 = self._create_question_suggestion_with_skill_id('skill3')
suggestions = (
suggestion_services
.get_question_suggestions_waiting_longest_for_review()
)
# Assert that the suggestions are in the order that they were created.
self.assertEqual(len(suggestions), 3)
self.assertEqual(
suggestions[0].suggestion_id, suggestion_1.suggestion_id)
self.assertEqual(
suggestions[1].suggestion_id, suggestion_2.suggestion_id)
self.assertEqual(
suggestions[2].suggestion_id, suggestion_3.suggestion_id)
for i in python_utils.RANGE(len(suggestions) - 1):
self.assertLess(
suggestions[i].last_updated, suggestions[i + 1].last_updated)
def test_query_suggestions_that_can_be_reviewed_by_user(self):
# User proficiency models for user1.
user_models.UserContributionProficiencyModel.create(
'user1', 'category1', 15)
user_models.UserContributionProficiencyModel.create(
'user1', 'category2', 15)
user_models.UserContributionProficiencyModel.create(
'user1', 'category3', 5)
# User proficiency models for user2.
user_models.UserContributionProficiencyModel.create(
'user2', 'category1', 5)
user_models.UserContributionProficiencyModel.create(
'user2', 'category2', 5)
user_models.UserContributionProficiencyModel.create(
'user2', 'category3', 5)
suggestion_models.GeneralSuggestionModel.create(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
'exp1', 1, suggestion_models.STATUS_IN_REVIEW, 'author_3',
'reviewer_2', self.change, 'category1',
'exploration.exp1.thread_1', None)
suggestion_models.GeneralSuggestionModel.create(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION, 'exp1', 1,
suggestion_models.STATUS_IN_REVIEW, 'author_3',
'reviewer_2', self.change, 'category2',
'exploration.exp1.thread_2', None)
suggestion_models.GeneralSuggestionModel.create(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION, 'exp1', 1,
suggestion_models.STATUS_IN_REVIEW, 'author_3',
'reviewer_2', self.change, 'category3',
'exploration.exp1.thread_3', None)
# This suggestion does not count as a suggestion that can be reviewed
# by a user because it has already been rejected.
suggestion_models.GeneralSuggestionModel.create(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION, 'exp1', 1,
suggestion_models.STATUS_REJECTED, 'author_3',
'reviewer_2', self.change, 'category1',
'exploration.exp1.thread_4', None)
suggestion_models.GeneralSuggestionModel.create(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION, 'exp1', 1,
suggestion_models.STATUS_IN_REVIEW, 'author_3',
'reviewer_2', self.change, 'category2',
'exploration.exp1.thread_5', None)
self.assertEqual(len(
suggestion_services
.get_all_suggestions_that_can_be_reviewed_by_user('user1')), 3)
self.assertEqual(len(
suggestion_services
.get_all_suggestions_that_can_be_reviewed_by_user('user2')), 0)
class SuggestionIntegrationTests(test_utils.GenericTestBase):
EXP_ID = 'exp1'
TOPIC_ID = 'topic1'
STORY_ID = 'story1'
TRANSLATION_LANGUAGE_CODE = 'en'
AUTHOR_EMAIL = 'author@example.com'
score_category = (
suggestion_models.SCORE_TYPE_CONTENT +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'Algebra')
THREAD_ID = 'exploration.exp1.thread_1'
COMMIT_MESSAGE = 'commit message'
def mock_generate_new_thread_id(self, unused_entity_type, unused_entity_id):
return self.THREAD_ID
def setUp(self):
super(SuggestionIntegrationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.AUTHOR_EMAIL, 'author')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.reviewer_id = self.editor_id
self.editor = user_services.UserActionsInfo(self.editor_id)
# Login and create exploration and suggestions.
self.login(self.EDITOR_EMAIL)
# Create exploration.
exploration = (
self.save_new_linear_exp_with_state_names_and_interactions(
self.EXP_ID, self.editor_id, ['State 1', 'State 2'],
['TextInput'], category='Algebra'))
self.old_content = state_domain.SubtitledHtml(
'content', '<p>old content</p>').to_dict()
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
self.TRANSLATION_LANGUAGE_CODE: {
'filename': 'filename3.mp3',
'file_size_bytes': 3000,
'needs_update': False,
'duration_secs': 42.43
}
},
'default_outcome': {},
'ca_placeholder_0': {}
}
}
self.old_recorded_voiceovers = (
state_domain.RecordedVoiceovers.from_dict(recorded_voiceovers_dict))
# Create content in State A with a single audio subtitle.
exploration.states['State 1'].update_content(
state_domain.SubtitledHtml.from_dict(self.old_content))
exploration.states['State 1'].update_recorded_voiceovers(
self.old_recorded_voiceovers)
exp_services._save_exploration(self.editor_id, exploration, '', []) # pylint: disable=protected-access
rights_manager.publish_exploration(self.editor, self.EXP_ID)
rights_manager.assign_role_for_exploration(
self.editor, self.EXP_ID, self.owner_id,
rights_domain.ROLE_EDITOR)
self.new_content = state_domain.SubtitledHtml(
'content', '<p>new content</p>').to_dict()
self.change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 1',
'new_value': self.new_content
}
self.target_version_at_submission = exploration.version
# Set up for testing translation suggestions. Translation suggestions
# correspond to a given topic, story and exploration.
self.save_new_topic(self.TOPIC_ID, self.owner_id)
self.save_new_story(
self.STORY_ID, self.owner_id, self.TOPIC_ID, title='A story',
description='Description', notes='Notes')
# Adds the story to the topic.
topic_services.add_canonical_story(
self.owner_id, self.TOPIC_ID, self.STORY_ID)
# Adds the exploration to the story.
story_change_list_to_add_an_exp = [
story_domain.StoryChange({
'cmd': 'add_story_node',
'node_id': 'node_1',
'title': 'Node1',
}), story_domain.StoryChange({
'cmd': 'update_story_node_property',
'property_name': 'exploration_id',
'node_id': 'node_1',
'old_value': None,
'new_value': self.EXP_ID
})
]
story_services.update_story(
self.owner_id, self.STORY_ID,
story_change_list_to_add_an_exp, 'Added exploration.')
def create_translation_suggestion_associated_with_exp(
self, exp_id, author_id):
"""Creates a translation suggestion that is associated with an
exploration with id exp_id. The author of the created suggestion is
author_id.
"""
# Gets the html content in the exploration to be translated.
exploration = exp_fetchers.get_exploration_by_id(exp_id)
content_html = exploration.states['State 1'].content.html
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': 'State 1',
'content_id': 'content',
'language_code': 'hi',
'content_html': content_html,
'translation_html': '<p>This is translated html.</p>'
}
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
exp_id, 1, author_id, add_translation_change_dict,
'test description')
def assert_created_suggestion_is_valid(self, target_id, author_id):
"""Assert that the created suggestion is in review and that only one
suggestion with the given target_id and author_id exists.
"""
suggestions = suggestion_services.query_suggestions(
[('author_id', author_id), ('target_id', target_id)])
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].status, suggestion_models.STATUS_IN_REVIEW)
def test_create_and_accept_suggestion(self):
with self.swap(
feedback_models.GeneralFeedbackThreadModel,
'generate_new_thread_id', self.mock_generate_new_thread_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.EXP_ID, self.target_version_at_submission,
self.author_id, self.change, 'test description')
suggestion_id = self.THREAD_ID
suggestion_services.accept_suggestion(
suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE, None)
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(
exploration.states['State 1'].content.html,
'<p>new content</p>')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
self.assertEqual(suggestion.status, suggestion_models.STATUS_ACCEPTED)
def test_create_and_reject_suggestion(self):
with self.swap(
feedback_models.GeneralFeedbackThreadModel,
'generate_new_thread_id', self.mock_generate_new_thread_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.EXP_ID, self.target_version_at_submission,
self.author_id, self.change, 'test description')
suggestion_id = self.THREAD_ID
suggestion_services.reject_suggestion(
suggestion_id, self.reviewer_id, 'Reject message')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
thread_messages = feedback_services.get_messages(self.THREAD_ID)
last_message = thread_messages[len(thread_messages) - 1]
self.assertEqual(
last_message.text, 'Reject message')
self.assertEqual(
exploration.states['State 1'].content.html,
'<p>old content</p>')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
self.assertEqual(suggestion.status, suggestion_models.STATUS_REJECTED)
def test_create_and_accept_suggestion_with_message(self):
with self.swap(
feedback_models.GeneralFeedbackThreadModel,
'generate_new_thread_id', self.mock_generate_new_thread_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.EXP_ID, self.target_version_at_submission,
self.author_id, self.change, 'test description')
suggestion_id = self.THREAD_ID
suggestion_services.accept_suggestion(
suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'Accept message')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
thread_messages = feedback_services.get_messages(self.THREAD_ID)
last_message = thread_messages[len(thread_messages) - 1]
self.assertEqual(
last_message.text, 'Accept message')
self.assertEqual(
exploration.states['State 1'].content.html,
'<p>new content</p>')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
self.assertEqual(suggestion.status, suggestion_models.STATUS_ACCEPTED)
def test_delete_skill_rejects_question_suggestion(self):
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.author_id, description='description')
suggestion_change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_SKILL, skill_id, 1,
self.author_id, suggestion_change, 'test description')
self.assert_created_suggestion_is_valid(skill_id, self.author_id)
skill_services.delete_skill(self.author_id, skill_id)
# Suggestion should be rejected after corresponding skill is deleted.
suggestions = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', skill_id)])
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].status, suggestion_models.STATUS_REJECTED)
def test_delete_topic_rejects_translation_suggestion(self):
self.create_translation_suggestion_associated_with_exp(
self.EXP_ID, self.author_id)
self.assert_created_suggestion_is_valid(self.EXP_ID, self.author_id)
topic_services.delete_topic(self.author_id, self.TOPIC_ID)
# Suggestion should be rejected after the topic is deleted.
suggestions = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', self.EXP_ID)])
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].status, suggestion_models.STATUS_REJECTED)
def test_delete_story_rejects_translation_suggestion(self):
self.create_translation_suggestion_associated_with_exp(
self.EXP_ID, self.author_id)
self.assert_created_suggestion_is_valid(self.EXP_ID, self.author_id)
story_services.delete_story(self.author_id, self.STORY_ID)
# Suggestion should be rejected after the story is deleted.
suggestions = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', self.EXP_ID)])
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].status, suggestion_models.STATUS_REJECTED)
def test_remove_exp_from_story_rejects_translation_suggestion(self):
self.create_translation_suggestion_associated_with_exp(
self.EXP_ID, self.author_id)
self.assert_created_suggestion_is_valid(self.EXP_ID, self.author_id)
# Removes the exploration from the story.
story_services.update_story(
self.owner_id, self.STORY_ID, [story_domain.StoryChange({
'cmd': 'update_story_node_property',
'property_name': 'exploration_id',
'node_id': 'node_1',
'old_value': self.EXP_ID,
'new_value': None
})], 'Removed exploration.')
# Suggestion should be rejected after exploration is removed from the
# story.
suggestions = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', self.EXP_ID)])
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].status, suggestion_models.STATUS_REJECTED)
class UserContributionProficiencyUnitTests(test_utils.GenericTestBase):
def setUp(self):
super(UserContributionProficiencyUnitTests, self).setUp()
self.signup('user1@example.com', 'user1')
self.signup('user2@example.com', 'user2')
self.user_1_id = self.get_user_id_from_email('user1@example.com')
self.user_2_id = self.get_user_id_from_email('user2@example.com')
def test_get_all_user_ids_who_are_allowed_to_review(self):
user_models.UserContributionProficiencyModel.create(
self.user_1_id, 'category1', 0)
user_models.UserContributionProficiencyModel.create(
self.user_1_id, 'category2',
feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW)
user_models.UserContributionProficiencyModel.create(
self.user_2_id, 'category1', 0)
user_models.UserContributionProficiencyModel.create(
self.user_2_id, 'category2', 0)
user_ids = (
suggestion_services.get_all_user_ids_who_are_allowed_to_review(
'category1'))
self.assertEqual(user_ids, [])
user_ids = (
suggestion_services.get_all_user_ids_who_are_allowed_to_review(
'category2'))
self.assertEqual(user_ids, [self.user_1_id])
self.assertFalse(suggestion_services.can_user_review_category(
self.user_1_id, 'category1'))
self.assertTrue(suggestion_services.can_user_review_category(
self.user_1_id, 'category2'))
self.assertFalse(suggestion_services.can_user_review_category(
self.user_2_id, 'category1'))
self.assertFalse(suggestion_services.can_user_review_category(
self.user_2_id, 'category1'))
def test_get_all_scores_of_the_user_with_multiple_scores(self):
user_models.UserContributionProficiencyModel.create(
self.user_1_id, 'category1', 1)
user_models.UserContributionProficiencyModel.create(
self.user_1_id, 'category2', 2)
user_models.UserContributionProficiencyModel.create(
self.user_1_id, 'category3', 3)
expected_scores_dict = {}
for index in python_utils.RANGE(1, 4):
key = 'category%s' % python_utils.UNICODE(index)
expected_scores_dict[key] = index
scores_dict = suggestion_services.get_all_scores_of_user(
self.user_1_id)
self.assertEqual(len(scores_dict), 3)
self.assertDictEqual(scores_dict, expected_scores_dict)
def test_get_all_scores_of_the_user_when_no_scores_exist(self):
scores_dict = suggestion_services.get_all_scores_of_user(
self.user_1_id)
self.assertEqual(len(scores_dict), 0)
self.assertDictEqual(scores_dict, {})
class VoiceoverApplicationServiceUnitTest(test_utils.GenericTestBase):
"""Tests for the ExplorationVoiceoverApplication class."""
def setUp(self):
super(VoiceoverApplicationServiceUnitTest, self).setUp()
self.signup('author@example.com', 'author')
self.author_id = self.get_user_id_from_email('author@example.com')
suggestion_models.GeneralVoiceoverApplicationModel(
id='application_id',
target_type='exploration',
target_id='0',
status='review',
author_id=self.author_id,
final_reviewer_id=None,
language_code='en',
filename='filename.mp3',
content='<p>content</p>',
rejection_message=None).put()
self.voiceover_application_model = (
suggestion_models.GeneralVoiceoverApplicationModel.get_by_id(
'application_id'))
def test_get_voiceover_application_from_model_with_invalid_type_raise_error(
self):
suggestion_services.get_voiceover_application(
self.voiceover_application_model.id)
self.voiceover_application_model.target_type = 'invalid_type'
with self.assertRaisesRegexp(
Exception,
'Invalid target type for voiceover application: invalid_type'):
suggestion_services.get_voiceover_application(
self.voiceover_application_model.id)
class ReviewableSuggestionEmailInfoUnitTests(
test_utils.GenericTestBase):
"""Tests the methods related to the ReviewableSuggestionEmailInfo class.
"""
target_id = 'exp1'
skill_id = 'skill1'
language_code = 'en'
AUTHOR_EMAIL = 'author1@example.com'
REVIEWER_EMAIL = 'reviewer@community.org'
COMMIT_MESSAGE = 'commit message'
def _create_translation_suggestion_with_translation_html(
self, translation_html):
"""Creates a translation suggestion with the given translation_html."""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': self.language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': translation_html
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_translation_change_dict,
'test description'
)
def _create_question_suggestion_with_question_html_content(
self, question_html_content):
"""Creates a question suggestion with the html content used for the
question in the question suggestion.
"""
with self.swap(
feconf, 'DEFAULT_INIT_STATE_CONTENT_STR', question_html_content):
add_question_change_dict = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': self.language_code,
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.skill_id,
'skill_difficulty': 0.3
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_SKILL,
self.skill_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_question_change_dict,
'test description'
)
def _create_edit_state_content_suggestion(self):
"""Creates an "edit state content" suggestion."""
edit_state_content_change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': 'new html content'
},
'old_value': {
'content_id': 'content',
'html': 'old html content'
}
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, edit_state_content_change_dict,
'test description')
def _assert_reviewable_suggestion_email_infos_are_equal(
self, reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info):
"""Asserts that the reviewable suggestion email info is equal to the
expected reviewable suggestion email info.
"""
self.assertEqual(
reviewable_suggestion_email_info.suggestion_type,
expected_reviewable_suggestion_email_info.suggestion_type)
self.assertEqual(
reviewable_suggestion_email_info.language_code,
expected_reviewable_suggestion_email_info.language_code)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_content,
expected_reviewable_suggestion_email_info.suggestion_content)
self.assertEqual(
reviewable_suggestion_email_info.submission_datetime,
expected_reviewable_suggestion_email_info.submission_datetime)
def setUp(self):
super(
ReviewableSuggestionEmailInfoUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(
self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(
self.REVIEWER_EMAIL)
self.save_new_valid_exploration(self.target_id, self.author_id)
def test_create_raises_for_suggestion_type_not_on_contributor_dashboard(
self):
edit_state_content_suggestion = (
self._create_edit_state_content_suggestion())
# Mocking the SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS dict in
# suggestion services so that this test still passes if the
# "edit state content" suggestion type is added to the Contributor
# Dashboard in the future.
suggestion_emphasized_text_getter_functions_mock = {}
with self.swap(
suggestion_services, 'SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS',
suggestion_emphasized_text_getter_functions_mock):
with self.assertRaisesRegexp(
Exception,
'Expected suggestion type to be offered on the Contributor '
'Dashboard, received: %s.' % (
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT)):
(
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
edit_state_content_suggestion)
)
def test_contributor_suggestion_types_are_in_suggestion_text_getter_dict(
self):
# This test will fail if a new suggestion type is added to the
# Contributor Dashboard but hasn't been added to
# SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS.
sorted_text_getter_dict_suggestion_types = sorted(
suggestion_services
.SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS.keys())
sorted_contributor_dashboard_suggestion_types = sorted(
suggestion_models.CONTRIBUTOR_DASHBOARD_SUGGESTION_TYPES)
self.assertListEqual(
sorted_text_getter_dict_suggestion_types,
sorted_contributor_dashboard_suggestion_types)
def test_create_from_suggestion_returns_info_for_question_suggestion(self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p>default question content</p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'default question content',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_from_suggestion_returns_info_for_translation_suggestion(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p>default translation content</p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'default translation content',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_from_suggestion_returns_info_for_empty_html(self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
''))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code, '',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_from_suggestion_returns_info_with_no_trailing_whitespace(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
' <p> test whitespace </p> '))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'test whitespace',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_math_rte(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p>translation with rte'
'<oppia-noninteractive-math></oppia-noninteractive-math></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'translation with rte [Math]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_image_rte(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p>translation with rte'
'<oppia-noninteractive-image></oppia-noninteractive-image>'
'</p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'translation with rte [Image]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_link_rte(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p> translation with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link> </p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'translation with rte [Link]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_rte_repeats(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p> translation with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link>'
'</p><oppia-noninteractive-link></oppia-noninteractive-link>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'translation with rte [Link] [Link]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_multi_rte(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p> translation with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link>'
'</p><oppia-noninteractive-math></oppia-noninteractive-math>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'translation with rte [Link] [Math]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_rte_value(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p><oppia-noninteractive-link text-with-value="&quot;Test '
'a tag&quot;" url-with-value="&quot;somelink&'
'quot;"></oppia-noninteractive-link></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'[Link]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_question_suggestion_if_html_has_math_rte(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p> question with rte'
'<oppia-noninteractive-math></oppia-noninteractive-math> </p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'question with rte [Math]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_question_suggestion_if_html_has_image_rte(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p> question with rte'
'<oppia-noninteractive-image></oppia-noninteractive-image>'
'</p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'question with rte [Image]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info)
def test_create_returns_info_for_question_suggestion_if_html_has_link_rte(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p> question with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link> </p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'question with rte [Link]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_question_suggestion_if_html_has_repeat_rte(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p> question with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link>'
'</p><oppia-noninteractive-link></oppia-noninteractive-link>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'question with rte [Link] [Link]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_question_suggestion_if_html_has_multi_rte(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p> question with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link>'
'</p><oppia-noninteractive-math></oppia-noninteractive-math>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'question with rte [Link] [Math]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_question_suggestion_if_html_has_rte_value(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p><oppia-noninteractive-link text-with-value="&quot;Test '
'a tag&quot;" url-with-value="&quot;somelink&'
'quot;"></oppia-noninteractive-link></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'[Link]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_suggestion_if_html_has_rte_with_text(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p><oppia-noninteractive-link text-with-value="&quot;Test '
'a tag&quot;" url-with-value="&quot;somelink&'
'quot;">text</oppia-noninteractive-link></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'[Link]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_suggestion_if_html_has_rte_with_html(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p><oppia-noninteractive-link text-with-value="&quot;Test '
'a tag&quot;" url-with-value="&quot;somelink&'
'quot;"><p>text</p></oppia-noninteractive-link></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'[Link]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_suggestion_if_html_has_rte_with_multi_word(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p><oppia-noninteractive-link-test text-with-value='
'"&quot;Test a tag&quot;" url-with-value="&quot;'
'somelink&quot;"><p>text</p>'
'</oppia-noninteractive-link-test></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'[Link Test]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
class GetSuggestionsWaitingForReviewInfoToNotifyReviewersUnitTests(
test_utils.GenericTestBase):
"""Test the ability of the
get_suggestions_waitng_for_review_info_to_notify_reviewers method
in suggestion services, which is used to retrieve the information required
to notify reviewers that there are suggestions that need review.
"""
target_id = 'exp1'
language_code = 'en'
AUTHOR_EMAIL = 'author1@example.com'
REVIEWER_1_EMAIL = 'reviewer1@community.org'
REVIEWER_2_EMAIL = 'reviewer2@community.org'
COMMIT_MESSAGE = 'commit message'
def _create_translation_suggestion_with_language_code_and_author(
self, language_code, author_id):
"""Creates a translation suggestion in the given language_code with the
given author id.
"""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': '<p>This is the translated content.</p>'
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
author_id, add_translation_change_dict,
'test description'
)
def _create_question_suggestion_with_skill_id_and_author_id(
self, skill_id, author_id):
"""Creates a question suggestion with the given skill_id."""
add_question_change_dict = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': self.language_code,
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_SKILL,
skill_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
author_id, add_question_change_dict,
'test description'
)
def _create_reviewable_suggestion_email_infos_from_suggestions(
self, suggestions):
"""Creates a list of ReviewableSuggestionEmailInfo objects from
the given suggestions.
"""
return [
(
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
suggestion)
) for suggestion in suggestions
]
def _assert_reviewable_suggestion_email_infos_are_in_correct_order(
self, reviewable_suggestion_email_infos,
expected_reviewable_suggestion_email_infos):
"""Asserts that the reviewable suggestion email infos are equal to the
expected reviewable suggestion email infos and that the reviewable
suggestion email infos are sorted in descending order according to
review wait time.
"""
self.assertEqual(
len(reviewable_suggestion_email_infos),
len(expected_reviewable_suggestion_email_infos)
)
for index, reviewable_suggestion_email_info in enumerate(
reviewable_suggestion_email_infos):
self.assertEqual(
reviewable_suggestion_email_info.suggestion_type,
expected_reviewable_suggestion_email_infos[
index].suggestion_type)
self.assertEqual(
reviewable_suggestion_email_info.language_code,
expected_reviewable_suggestion_email_infos[
index].language_code)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_content,
expected_reviewable_suggestion_email_infos[
index].suggestion_content)
self.assertEqual(
reviewable_suggestion_email_info.submission_datetime,
expected_reviewable_suggestion_email_infos[
index].submission_datetime)
for index in python_utils.RANGE(
len(reviewable_suggestion_email_infos) - 1):
self.assertLess(
reviewable_suggestion_email_infos[index].submission_datetime,
reviewable_suggestion_email_infos[
index + 1].submission_datetime
)
def setUp(self):
super(
GetSuggestionsWaitingForReviewInfoToNotifyReviewersUnitTests,
self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_1_EMAIL, 'reviewer1')
self.reviewer_1_id = self.get_user_id_from_email(
self.REVIEWER_1_EMAIL)
self.signup(self.REVIEWER_2_EMAIL, 'reviewer2')
self.reviewer_2_id = self.get_user_id_from_email(
self.REVIEWER_2_EMAIL)
self.save_new_valid_exploration(self.target_id, self.author_id)
def test_get_returns_empty_for_reviewers_who_authored_the_suggestions(self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_1', self.reviewer_1_id)
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.reviewer_1_id)
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]))
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self.assertEqual(reviewable_suggestion_email_infos, [[]])
def test_get_returns_empty_for_question_reviewers_if_only_translation_exist(
self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id)
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]))
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self.assertEqual(reviewable_suggestion_email_infos, [[]])
def test_get_returns_empty_for_translation_reviewers_if_only_question_exist(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_1', self.reviewer_1_id)
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]))
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self.assertEqual(reviewable_suggestion_email_infos, [[]])
def test_get_returns_empty_for_accepted_suggestions(self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
translation_suggestion = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
suggestion_services.accept_suggestion(
translation_suggestion.suggestion_id, self.reviewer_1_id,
self.COMMIT_MESSAGE, 'review message')
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]))
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self.assertEqual(reviewable_suggestion_email_infos, [[]])
def test_get_returns_empty_for_rejected_suggestions(self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
translation_suggestion = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
suggestion_services.reject_suggestion(
translation_suggestion.suggestion_id, self.reviewer_1_id,
'review message')
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]))
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self.assertEqual(reviewable_suggestion_email_infos, [[]])
def test_get_returns_suggestion_infos_for_a_translation_reviewer_same_lang(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
translation_suggestion_2 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
expected_reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion_1, translation_suggestion_2]))
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]))
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos)
def test_get_returns_empty_for_a_translation_reviewer_with_diff_lang_rights(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id)
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]))
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self.assertEqual(reviewable_suggestion_email_infos, [[]])
def test_get_returns_suggestion_infos_for_translation_reviewer_multi_lang(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
translation_suggestion_2 = (
self._create_translation_suggestion_with_language_code_and_author(
'en', self.author_id))
translation_suggestion_3 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
expected_reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[
translation_suggestion_1, translation_suggestion_2,
translation_suggestion_3]))
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]
)
)
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos)
def test_get_returns_infos_for_translation_reviewer_past_limit_same_lang(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
# Create another translation suggestion so that we pass the
# MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_REVIEWER limit.
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id)
expected_reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion_1]))
with self.swap(
suggestion_services,
'MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_REVIEWER', 1):
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]))
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos)
def test_get_returns_infos_for_translation_reviewer_past_limit_diff_lang(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
translation_suggestion_2 = (
self._create_translation_suggestion_with_language_code_and_author(
'en', self.author_id))
# Create another hindi and english translation suggestion so that we
# reach the MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_REVIEWER limit for each
# language code but continue to update which suggestions have been
# waiting the longest (since the top two suggestions waiting the
# longest are from different language codes).
self._create_translation_suggestion_with_language_code_and_author(
'en', self.author_id)
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id)
expected_reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion_1, translation_suggestion_2]))
with self.swap(
suggestion_services,
'MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_REVIEWER', 2):
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]))
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos)
def test_get_returns_suggestion_infos_for_multiple_translation_reviewers(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_2_id, 'hi')
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
translation_suggestion_2 = (
self._create_translation_suggestion_with_language_code_and_author(
'en', self.author_id))
translation_suggestion_3 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
expected_reviewable_suggestion_email_infos_reviewer_1 = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[
translation_suggestion_1, translation_suggestion_2,
translation_suggestion_3]))
expected_reviewable_suggestion_email_infos_reviewer_2 = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion_1, translation_suggestion_3]))
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id, self.reviewer_2_id]
)
)
self.assertEqual(len(reviewable_suggestion_email_infos), 2)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos_reviewer_1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[1],
expected_reviewable_suggestion_email_infos_reviewer_2)
def test_get_returns_suggestion_infos_for_reviewer_with_multi_review_rights(
self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
suggestion_1 = (
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_1', self.author_id))
suggestion_2 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
suggestion_3 = (
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_2', self.author_id))
suggestion_4 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
suggestion_5 = (
self._create_translation_suggestion_with_language_code_and_author(
'en', self.author_id))
expected_reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[
suggestion_1, suggestion_2, suggestion_3, suggestion_4,
suggestion_5]))
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]
)
)
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos)
def test_get_returns_suggestion_infos_for_a_question_reviewer(self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
question_suggestion_1 = (
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_1', self.author_id))
question_suggestion_2 = (
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_2', self.author_id))
expected_reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[question_suggestion_1, question_suggestion_2]))
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]
)
)
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos)
def test_get_returns_suggestion_infos_for_multi_question_reviewers(self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
user_services.allow_user_to_review_question(self.reviewer_2_id)
question_suggestion_1 = (
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_1', self.author_id))
question_suggestion_2 = (
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_2', self.author_id))
expected_reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[question_suggestion_1, question_suggestion_2]))
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id, self.reviewer_2_id]
)
)
self.assertEqual(len(reviewable_suggestion_email_infos), 2)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[1],
expected_reviewable_suggestion_email_infos)
def test_get_returns_suggestion_infos_for_question_reviewer_past_limit(
self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
question_suggestion_1 = (
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_1', self.author_id))
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_2', self.author_id)
expected_reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[question_suggestion_1]))
with self.swap(
suggestion_services,
'MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_REVIEWER', 1):
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]
)
)
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos)
def test_get_returns_suggestion_infos_for_multi_reviewers_with_multi_rights(
self):
# Reviewer 1's permissions.
user_services.allow_user_to_review_question(self.reviewer_1_id)
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
# Reviewer 2's permissions.
user_services.allow_user_to_review_question(self.reviewer_2_id)
user_services.allow_user_to_review_translation_in_language(
self.reviewer_2_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_2_id, 'fr')
suggestion_1 = (
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_1', self.author_id))
suggestion_2 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
suggestion_3 = (
self._create_translation_suggestion_with_language_code_and_author(
'fr', self.author_id))
suggestion_4 = (
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_2', self.author_id))
suggestion_5 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
suggestion_6 = (
self._create_translation_suggestion_with_language_code_and_author(
'en', self.author_id))
expected_reviewable_suggestion_email_infos_reviewer_1 = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[
suggestion_1, suggestion_2, suggestion_4, suggestion_5,
suggestion_6]))
expected_reviewable_suggestion_email_infos_reviewer_2 = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[
suggestion_1, suggestion_2, suggestion_3, suggestion_4,
suggestion_5]))
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id, self.reviewer_2_id]
)
)
self.assertEqual(len(reviewable_suggestion_email_infos), 2)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos_reviewer_1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[1],
expected_reviewable_suggestion_email_infos_reviewer_2)
def test_get_returns_infos_for_reviewer_with_multi_rights_past_limit(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_question(self.reviewer_1_id)
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id))
# Create additional suggestions so that we pass the
# MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_REVIEWER limit regardless of
# suggestion type.
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_1', self.author_id)
self._create_translation_suggestion_with_language_code_and_author(
'hi', self.author_id)
self._create_question_suggestion_with_skill_id_and_author_id(
'skill_1', self.author_id)
expected_reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion_1]))
with self.swap(
suggestion_services,
'MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_REVIEWER', 1):
reviewable_suggestion_email_infos = (
suggestion_services
.get_suggestions_waiting_for_review_info_to_notify_reviewers(
[self.reviewer_1_id]))
self.assertEqual(len(reviewable_suggestion_email_infos), 1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
reviewable_suggestion_email_infos[0],
expected_reviewable_suggestion_email_infos)
class CommunityContributionStatsUnitTests(test_utils.GenericTestBase):
"""Test the functionality related to updating the community contribution
stats.
TODO(#10957): It is currently not possible to resubmit a rejected
translation suggestion for review. As a result, there isn't a test for
that case in this test class. If the functionality is added, a new test
should be added here to cover that case. If the functionality is not going
to be added then this can be removed. See issue #10957 for more context.
"""
target_id = 'exp1'
skill_id = 'skill_123456'
language_code = 'en'
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@community.org'
COMMIT_MESSAGE = 'commit message'
def _create_translation_suggestion_with_language_code(self, language_code):
"""Creates a translation suggestion in the given language_code."""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': '<p>This is the translated content.</p>'
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_translation_change_dict,
'test description'
)
def _create_question_suggestion(self):
"""Creates a question suggestion."""
add_question_change_dict = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': self.language_code,
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.skill_id,
'skill_difficulty': 0.3
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_SKILL,
self.skill_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_question_change_dict,
'test description'
)
def _create_edit_state_content_suggestion(self):
"""Creates an "edit state content" suggestion."""
edit_state_content_change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': 'new html content'
},
'old_value': {
'content_id': 'content',
'html': 'old html content'
}
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, edit_state_content_change_dict,
'test description'
)
def _assert_community_contribution_stats_is_in_default_state(self):
"""Checks if the community contribution stats is in its default
state.
"""
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
), {})
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {})
self.assertEqual(
community_contribution_stats.question_reviewer_count, 0)
self.assertEqual(
community_contribution_stats.question_suggestion_count, 0)
def setUp(self):
super(
CommunityContributionStatsUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(
self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(
self.REVIEWER_EMAIL)
self.save_new_valid_exploration(self.target_id, self.author_id)
self.save_new_skill(self.skill_id, self.author_id)
def test_create_edit_state_content_suggestion_does_not_change_the_counts(
self):
self._create_edit_state_content_suggestion()
self._assert_community_contribution_stats_is_in_default_state()
def test_accept_edit_state_content_suggestion_does_not_change_the_counts(
self):
edit_state_content_suggestion = (
self._create_edit_state_content_suggestion())
self._assert_community_contribution_stats_is_in_default_state()
suggestion_services.accept_suggestion(
edit_state_content_suggestion.suggestion_id, self.reviewer_id,
self.COMMIT_MESSAGE, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_edit_state_content_suggestion_does_not_change_the_counts(
self):
edit_state_content_suggestion = (
self._create_edit_state_content_suggestion())
self._assert_community_contribution_stats_is_in_default_state()
suggestion_services.reject_suggestion(
edit_state_content_suggestion.suggestion_id, self.reviewer_id,
'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_edit_state_content_suggestions_does_not_change_the_counts(
self):
edit_state_content_suggestion_1 = (
self._create_edit_state_content_suggestion())
edit_state_content_suggestion_2 = (
self._create_edit_state_content_suggestion())
self._assert_community_contribution_stats_is_in_default_state()
suggestion_services.reject_suggestions(
[
edit_state_content_suggestion_1.suggestion_id,
edit_state_content_suggestion_2.suggestion_id
], self.reviewer_id, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_resubmit_edit_state_content_suggestion_does_not_change_the_counts(
self):
edit_state_content_suggestion = (
self._create_edit_state_content_suggestion())
suggestion_services.reject_suggestion(
edit_state_content_suggestion.suggestion_id, self.reviewer_id,
'review message')
self._assert_community_contribution_stats_is_in_default_state()
# Change the new_value of the html of the suggestion that got rejected
# so we can resubmit the suggestion for review.
resubmit_suggestion_change = edit_state_content_suggestion.change
resubmit_suggestion_change.new_value['html'] = 'new html to resubmit'
# Resubmit the rejected "edit state content" suggestion.
suggestion_services.resubmit_rejected_suggestion(
edit_state_content_suggestion.suggestion_id,
'resubmit summary message', self.author_id,
resubmit_suggestion_change)
self._assert_community_contribution_stats_is_in_default_state()
def test_create_question_suggestion_increases_question_suggestion_count(
self):
self._create_question_suggestion()
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_create_multi_question_suggestions_increases_question_count(self):
self._create_question_suggestion()
self._create_question_suggestion()
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 2)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_accept_question_suggestion_decreases_question_suggestion_count(
self):
question_suggestion = self._create_question_suggestion()
# Assert that the question suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_services.accept_suggestion(
question_suggestion.suggestion_id, self.reviewer_id,
self.COMMIT_MESSAGE, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_question_suggestion_decreases_question_suggestion_count(
self):
question_suggestion = self._create_question_suggestion()
# Assert that the question suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_services.reject_suggestion(
question_suggestion.suggestion_id, self.reviewer_id,
'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_question_suggestions_decreases_question_suggestion_count(
self):
question_suggestion_1 = self._create_question_suggestion()
question_suggestion_2 = self._create_question_suggestion()
# Assert that the question suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 2)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_services.reject_suggestions(
[
question_suggestion_1.suggestion_id,
question_suggestion_2.suggestion_id
], self.reviewer_id, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_resubmit_question_suggestion_increases_question_suggestion_count(
self):
question_suggestion = self._create_question_suggestion()
# Assert that the question suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_services.reject_suggestion(
question_suggestion.suggestion_id, self.reviewer_id,
'review message')
# Assert that the question suggestion decreased because the suggestion
# was rejected.
self._assert_community_contribution_stats_is_in_default_state()
# Change the question_dict of the question suggestion that got rejected
# so we can resubmit the suggestion for review.
resubmit_question_change = question_suggestion.change
resubmit_question_change.question_dict['linked_skill_ids'] = ['skill1']
# Resubmit the rejected question suggestion.
suggestion_services.resubmit_rejected_suggestion(
question_suggestion.suggestion_id, 'resubmit summary message',
self.author_id, resubmit_question_change
)
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_create_translation_suggestion_raises_translation_suggestion_count(
self):
self._create_translation_suggestion_with_language_code(
self.language_code)
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{self.language_code: 1})
def test_create_translation_suggestions_diff_lang_raises_translation_counts(
self):
self._create_translation_suggestion_with_language_code('hi')
self._create_translation_suggestion_with_language_code('en')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'hi': 1, 'en': 1})
def test_create_translation_suggestions_eq_lang_increases_translation_count(
self):
self._create_translation_suggestion_with_language_code('hi')
self._create_translation_suggestion_with_language_code('hi')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {'hi': 2})
def test_accept_translation_suggestion_lowers_translation_suggestion_count(
self):
translation_suggestion = (
self._create_translation_suggestion_with_language_code(
self.language_code))
# Assert that the translation suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{self.language_code: 1})
suggestion_services.accept_suggestion(
translation_suggestion.suggestion_id, self.reviewer_id,
self.COMMIT_MESSAGE, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_translation_suggestion_lowers_translation_suggestion_count(
self):
translation_suggestion = (
self._create_translation_suggestion_with_language_code(
self.language_code))
# Assert that the translation suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{self.language_code: 1})
suggestion_services.reject_suggestion(
translation_suggestion.suggestion_id, self.reviewer_id,
'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_one_translation_suggestion_diff_lang_lowers_only_one_count(
self):
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code('hi'))
# Create a translation suggestion in a different language that won't be
# rejected.
self._create_translation_suggestion_with_language_code('en')
# Assert that the translation suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'hi': 1, 'en': 1})
suggestion_services.reject_suggestion(
translation_suggestion_1.suggestion_id, self.reviewer_id,
'review message')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {'en': 1})
def test_reject_translation_suggestions_diff_lang_lowers_translation_count(
self):
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code('hi'))
translation_suggestion_2 = (
self._create_translation_suggestion_with_language_code('en'))
# Assert that the translation suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'hi': 1, 'en': 1})
suggestion_services.reject_suggestions(
[
translation_suggestion_1.suggestion_id,
translation_suggestion_2.suggestion_id
], self.reviewer_id, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_translation_suggestions_same_lang_lowers_translation_count(
self):
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code(
self.language_code))
translation_suggestion_2 = (
self._create_translation_suggestion_with_language_code(
self.language_code))
# Assert that the translation suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{self.language_code: 2})
suggestion_services.reject_suggestions(
[
translation_suggestion_1.suggestion_id,
translation_suggestion_2.suggestion_id
], self.reviewer_id, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_suggestions_diff_type_decreases_suggestion_counts(self):
suggestion_1 = (
self._create_translation_suggestion_with_language_code('hi'))
suggestion_2 = (
self._create_translation_suggestion_with_language_code('en'))
suggestion_3 = self._create_edit_state_content_suggestion()
suggestion_4 = self._create_question_suggestion()
# Assert that the suggestion counts increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'hi': 1, 'en': 1})
suggestion_services.reject_suggestions(
[
suggestion_1.suggestion_id, suggestion_2.suggestion_id,
suggestion_3.suggestion_id, suggestion_4.suggestion_id
], self.reviewer_id, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_create_suggestions_diff_type_increases_suggestion_counts(self):
self._create_translation_suggestion_with_language_code('hi')
self._create_translation_suggestion_with_language_code('en')
self._create_question_suggestion()
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'hi': 1, 'en': 1})
class GetSuggestionsWaitingTooLongForReviewInfoForAdminsUnitTests(
test_utils.GenericTestBase):
"""Test the ability of the
get_info_about_suggestions_waiting_too_long_for_review method in suggestion
services, which is used to retrieve the information required to notify
admins if there are suggestions that have waited longer than
suggestion_models.SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS days for a
review on the Contributor Dashboard.
"""
target_id = 'exp1'
skill_id = 'skill_123456'
language_code = 'en'
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_1_EMAIL = 'reviewer1@community.org'
REVIEWER_2_EMAIL = 'reviewer2@community.org'
COMMIT_MESSAGE = 'commit message'
mocked_datetime_utcnow = datetime.datetime(2020, 6, 15, 5)
def _create_translation_suggestion(self):
"""Creates a translation suggestion."""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': self.language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': '<p>This is the translated content.</p>'
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_translation_change_dict,
'test description'
)
def _create_question_suggestion(self):
"""Creates a question suggestion."""
add_question_change_dict = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': self.language_code,
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.skill_id,
'skill_difficulty': 0.3
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_SKILL,
self.skill_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_question_change_dict,
'test description'
)
def _create_reviewable_suggestion_email_infos_from_suggestions(
self, suggestions):
"""Creates a list of ReviewableSuggestionEmailInfo objects from
the given suggestions.
"""
return [
(
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
suggestion)
) for suggestion in suggestions
]
def _assert_reviewable_suggestion_email_infos_are_in_correct_order(
self, reviewable_suggestion_email_infos,
expected_reviewable_suggestion_email_infos):
"""Asserts that the reviewable suggestion email infos are equal to the
expected reviewable suggestion email infos and that the reviewable
suggestion email infos are sorted in descending order according to
review wait time.
"""
self.assertEqual(
len(reviewable_suggestion_email_infos),
len(expected_reviewable_suggestion_email_infos)
)
for index, reviewable_suggestion_email_info in enumerate(
reviewable_suggestion_email_infos):
self.assertEqual(
reviewable_suggestion_email_info.suggestion_type,
expected_reviewable_suggestion_email_infos[
index].suggestion_type)
self.assertEqual(
reviewable_suggestion_email_info.language_code,
expected_reviewable_suggestion_email_infos[
index].language_code)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_content,
expected_reviewable_suggestion_email_infos[
index].suggestion_content)
self.assertEqual(
reviewable_suggestion_email_info.submission_datetime,
expected_reviewable_suggestion_email_infos[
index].submission_datetime)
for index in python_utils.RANGE(
len(reviewable_suggestion_email_infos) - 1):
self.assertLess(
reviewable_suggestion_email_infos[index].submission_datetime,
reviewable_suggestion_email_infos[
index + 1].submission_datetime
)
def setUp(self):
super(
GetSuggestionsWaitingTooLongForReviewInfoForAdminsUnitTests,
self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_1_EMAIL, 'reviewer1')
self.reviewer_1_id = self.get_user_id_from_email(
self.REVIEWER_1_EMAIL)
self.signup(self.REVIEWER_2_EMAIL, 'reviewer2')
self.reviewer_2_id = self.get_user_id_from_email(
self.REVIEWER_2_EMAIL)
self.save_new_valid_exploration(self.target_id, self.author_id)
self.save_new_skill(self.skill_id, self.author_id)
def test_get_returns_empty_for_suggestion_type_not_on_contributor_dashboard(
self):
self._create_translation_suggestion()
# This mocked list cannot be empty because then the storage query in the
# get_suggestions_waiting_too_long_for_review method will fail.
mocked_contributor_dashboard_suggestion_types = [
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION]
with self.swap(
suggestion_models, 'CONTRIBUTOR_DASHBOARD_SUGGESTION_TYPES',
mocked_contributor_dashboard_suggestion_types):
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 0)
def test_get_returns_empty_if_suggestion_review_wait_time_diff_is_negative(
self):
self._create_translation_suggestion()
# Make sure the threshold is nonzero.
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 1):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 0)
def test_get_returns_empty_if_suggestions_have_waited_less_than_threshold(
self):
with self.mock_datetime_utcnow(self.mocked_datetime_utcnow):
self._create_translation_suggestion()
self._create_question_suggestion()
mocked_threshold_review_wait_time_in_days = 2
mocked_datetime_less_than_review_wait_time_threshold = (
self.mocked_datetime_utcnow + datetime.timedelta(days=1))
with self.mock_datetime_utcnow(
mocked_datetime_less_than_review_wait_time_threshold):
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS',
mocked_threshold_review_wait_time_in_days):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 0)
def test_get_returns_empty_if_suggestions_have_waited_threshold_review_time(
self):
with self.mock_datetime_utcnow(self.mocked_datetime_utcnow):
self._create_translation_suggestion()
mocked_threshold_review_wait_time_in_days = 2
mocked_datetime_eq_review_wait_time_threshold = (
self.mocked_datetime_utcnow + datetime.timedelta(
days=mocked_threshold_review_wait_time_in_days))
with self.mock_datetime_utcnow(
mocked_datetime_eq_review_wait_time_threshold):
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS',
mocked_threshold_review_wait_time_in_days):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 0)
def test_get_returns_suggestion_waited_long_if_their_wait_is_past_threshold(
self):
with self.mock_datetime_utcnow(self.mocked_datetime_utcnow):
translation_suggestion = self._create_translation_suggestion()
# Give the question suggestion a slightly different review submission
# time so that the suggestions are not indistinguishable, in terms of
# their review submission time.
with self.mock_datetime_utcnow(
self.mocked_datetime_utcnow + datetime.timedelta(minutes=5)):
question_suggestion = self._create_question_suggestion()
expected_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion, question_suggestion]))
mocked_threshold_review_wait_time_in_days = 1
mocked_datetime_past_review_wait_time_threshold = (
self.mocked_datetime_utcnow + datetime.timedelta(days=2))
with self.mock_datetime_utcnow(
mocked_datetime_past_review_wait_time_threshold):
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS',
mocked_threshold_review_wait_time_in_days):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 2)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
info_about_suggestions_waiting_too_long_for_review,
expected_suggestion_email_infos
)
def test_get_only_returns_suggestions_that_have_waited_past_wait_threshold(
self):
with self.mock_datetime_utcnow(self.mocked_datetime_utcnow):
translation_suggestion = self._create_translation_suggestion()
with self.mock_datetime_utcnow(
self.mocked_datetime_utcnow + datetime.timedelta(days=2)):
self._create_question_suggestion()
expected_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion]))
mocked_threshold_review_wait_time_in_days = 3
mocked_datetime_past_review_wait_time_threshold = (
self.mocked_datetime_utcnow + datetime.timedelta(days=4))
with self.mock_datetime_utcnow(
mocked_datetime_past_review_wait_time_threshold):
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS',
mocked_threshold_review_wait_time_in_days):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
# The question suggestion was created 2 days after the translation
# suggestion, so it has only waited 1 day for a review, which is less
# than 3, the mocked review wait time threshold. Therefore, only the
# translation suggestion has waited too long for review.
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
info_about_suggestions_waiting_too_long_for_review,
expected_suggestion_email_infos
)
class GetSuggestionTypesThatNeedReviewersUnitTests(test_utils.GenericTestBase):
"""Tests for the get_suggestion_types_that_need_reviewers method."""
sample_language_code = 'en'
target_id = 'exp1'
skill_id = 'skill_123456'
language_code = 'en'
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@community.org'
def _create_translation_suggestion_with_language_code(self, language_code):
"""Creates a translation suggestion in the given language_code."""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': '<p>This is the translated content.</p>'
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_translation_change_dict,
'test description'
)
def _create_question_suggestion(self):
"""Creates a question suggestion."""
add_question_change_dict = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.skill_id,
'skill_difficulty': 0.3
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_SKILL,
self.skill_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_question_change_dict,
'test description'
)
def _assert_community_contribution_stats_is_in_default_state(self):
"""Checks if the community contribution stats is in its default
state.
"""
community_contribution_stats = (
suggestion_services.get_community_contribution_stats())
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
), {})
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {})
self.assertEqual(
community_contribution_stats.question_reviewer_count, 0)
self.assertEqual(
community_contribution_stats.question_suggestion_count, 0)
def setUp(self):
super(
GetSuggestionTypesThatNeedReviewersUnitTests,
self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.save_new_valid_exploration(self.target_id, self.author_id)
self.save_new_skill(self.skill_id, self.author_id)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(
self.REVIEWER_EMAIL)
def test_get_returns_no_reviewers_needed_if_no_suggestions_exist(self):
self._assert_community_contribution_stats_is_in_default_state()
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(suggestion_types_needing_reviewers, {})
def test_get_returns_no_reviewers_needed_if_question_reviewer_no_question(
self):
user_services.allow_user_to_review_question(self.reviewer_id)
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(suggestion_types_needing_reviewers, {})
def test_get_returns_not_needed_if_translation_reviewers_but_no_translation(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_id, 'en')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_id, 'fr')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'en': 1, 'fr': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(suggestion_types_needing_reviewers, {})
def test_get_returns_no_reviewers_needed_if_enough_translation_reviewers(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_id, 'en')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_id, 'fr')
self._create_translation_suggestion_with_language_code('en')
self._create_translation_suggestion_with_language_code('fr')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'en': 1, 'fr': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {
'en': 1, 'fr': 1})
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(suggestion_types_needing_reviewers, {})
def test_get_returns_no_reviewers_needed_if_enough_question_reviewers(
self):
user_services.allow_user_to_review_question(self.reviewer_id)
self._create_question_suggestion()
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(suggestion_types_needing_reviewers, {})
def test_get_returns_reviewers_needed_if_question_but_no_reviewers(
self):
self._create_question_suggestion()
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{suggestion_models.SUGGESTION_TYPE_ADD_QUESTION: {}})
def test_get_returns_reviewers_needed_if_translation_for_a_lang_no_reviewer(
self):
self._create_translation_suggestion_with_language_code(
self.sample_language_code)
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {
self.sample_language_code: 1})
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT: {
self.sample_language_code}})
def test_get_returns_reviewers_needed_if_translation_for_langs_no_reviewers(
self):
self._create_translation_suggestion_with_language_code('en')
self._create_translation_suggestion_with_language_code('fr')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {
'en': 1, 'fr': 1})
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT: {'en', 'fr'}})
def test_get_returns_reviewers_needed_if_multi_suggestion_types_no_reviewer(
self):
self._create_question_suggestion()
self._create_translation_suggestion_with_language_code('en')
self._create_translation_suggestion_with_language_code('fr')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'en': 1, 'fr': 1})
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT: {
'en', 'fr'},
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION: {}
})
| 44.331808
| 126
| 0.676704
| 18,514
| 174,357
| 5.897807
| 0.032624
| 0.039838
| 0.065481
| 0.045608
| 0.875788
| 0.848789
| 0.822038
| 0.80254
| 0.78731
| 0.767355
| 0
| 0.006318
| 0.257472
| 174,357
| 3,932
| 127
| 44.343082
| 0.837091
| 0.072805
| 0
| 0.736427
| 0
| 0.001894
| 0.079003
| 0.023691
| 0
| 0
| 0
| 0.000254
| 0.114899
| 1
| 0.054293
| false
| 0.000947
| 0.007891
| 0.000316
| 0.094066
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
077dfbe10301b4060c770e725c7c1a699cb212d3
| 72,401
|
py
|
Python
|
sdk/storage/azure-storage-file-share/azure/storage/fileshare/_generated/operations/_share_operations.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | 1
|
2020-03-05T18:10:35.000Z
|
2020-03-05T18:10:35.000Z
|
sdk/storage/azure-storage-file-share/azure/storage/fileshare/_generated/operations/_share_operations.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | 2
|
2020-03-03T23:11:13.000Z
|
2020-03-30T18:50:55.000Z
|
sdk/storage/azure-storage-file-share/azure/storage/fileshare/_generated/operations/_share_operations.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import map_error
from .. import models
class ShareOperations(object):
"""ShareOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar restype: . Constant value: "share".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
self.restype = "share"
def create(self, timeout=None, metadata=None, quota=None, access_tier=None, enabled_protocols=None, root_squash=None, cls=None, **kwargs):
"""Creates a new share under the specified account. If the share with the
same name already exists, the operation fails.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param metadata: A name-value pair to associate with a file storage
object.
:type metadata: str
:param quota: Specifies the maximum size of the share, in gigabytes.
:type quota: int
:param access_tier: Specifies the access tier of the share. Possible
values include: 'TransactionOptimized', 'Hot', 'Cool'
:type access_tier: str or
~azure.storage.fileshare.models.ShareAccessTier
:param enabled_protocols: Protocols to enable on the share.
:type enabled_protocols: str
:param root_squash: Root squash to set on the share. Only valid for
NFS shares. Possible values include: 'NoRootSquash', 'RootSquash',
'AllSquash'
:type root_squash: str or
~azure.storage.fileshare.models.ShareRootSquash
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
# Construct headers
header_parameters = {}
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
if quota is not None:
header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1)
if access_tier is not None:
header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if enabled_protocols is not None:
header_parameters['x-ms-enabled-protocols'] = self._serialize.header("enabled_protocols", enabled_protocols, 'str')
if root_squash is not None:
header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'ShareRootSquash')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
create.metadata = {'url': '/{shareName}'}
def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs):
"""Returns all user-defined metadata and system properties for the
specified share or share snapshot. The data returned does not include
the share's list of files.
:param sharesnapshot: The snapshot parameter is an opaque DateTime
value that, when present, specifies the share snapshot to query.
:type sharesnapshot: str
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.fileshare.models.LeaseAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
# Construct URL
url = self.get_properties.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')),
'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')),
'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')),
'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')),
'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')),
'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')),
'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')),
'x-ms-access-tier-transition-state': self._deserialize('str', response.headers.get('x-ms-access-tier-transition-state')),
'x-ms-enabled-protocols': self._deserialize('str', response.headers.get('x-ms-enabled-protocols')),
'x-ms-root-squash': self._deserialize(models.ShareRootSquash, response.headers.get('x-ms-root-squash')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
get_properties.metadata = {'url': '/{shareName}'}
def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, lease_access_conditions=None, cls=None, **kwargs):
"""Operation marks the specified share or share snapshot for deletion. The
share or share snapshot and any files contained within it are later
deleted during garbage collection.
:param sharesnapshot: The snapshot parameter is an opaque DateTime
value that, when present, specifies the share snapshot to query.
:type sharesnapshot: str
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param delete_snapshots: Specifies the option include to delete the
base share and all of its snapshots. Possible values include:
'include', 'include-leased'
:type delete_snapshots: str or
~azure.storage.fileshare.models.DeleteSnapshotsOptionType
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.fileshare.models.LeaseAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if delete_snapshots is not None:
header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
delete.metadata = {'url': '/{shareName}'}
def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, sharesnapshot=None, request_id=None, cls=None, **kwargs):
"""The Lease Share operation establishes and manages a lock on a share, or
the specified snapshot for set and delete share operations.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param duration: Specifies the duration of the lease, in seconds, or
negative one (-1) for a lease that never expires. A non-infinite lease
can be between 15 and 60 seconds. A lease duration cannot be changed
using renew or change.
:type duration: int
:param proposed_lease_id: Proposed lease ID, in a GUID string format.
The File service returns 400 (Invalid request) if the proposed lease
ID is not in the correct format. See Guid Constructor (String) for a
list of valid GUID string formats.
:type proposed_lease_id: str
:param sharesnapshot: The snapshot parameter is an opaque DateTime
value that, when present, specifies the share snapshot to query.
:type sharesnapshot: str
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
comp = "lease"
action = "acquire"
# Construct URL
url = self.acquire_lease.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
# Construct headers
header_parameters = {}
if duration is not None:
header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
if proposed_lease_id is not None:
header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
acquire_lease.metadata = {'url': '/{shareName}'}
def release_lease(self, lease_id, timeout=None, sharesnapshot=None, request_id=None, cls=None, **kwargs):
"""The Lease Share operation establishes and manages a lock on a share, or
the specified snapshot for set and delete share operations.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param sharesnapshot: The snapshot parameter is an opaque DateTime
value that, when present, specifies the share snapshot to query.
:type sharesnapshot: str
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
comp = "lease"
action = "release"
# Construct URL
url = self.release_lease.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
release_lease.metadata = {'url': '/{shareName}'}
def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, sharesnapshot=None, request_id=None, cls=None, **kwargs):
"""The Lease Share operation establishes and manages a lock on a share, or
the specified snapshot for set and delete share operations.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param proposed_lease_id: Proposed lease ID, in a GUID string format.
The File service returns 400 (Invalid request) if the proposed lease
ID is not in the correct format. See Guid Constructor (String) for a
list of valid GUID string formats.
:type proposed_lease_id: str
:param sharesnapshot: The snapshot parameter is an opaque DateTime
value that, when present, specifies the share snapshot to query.
:type sharesnapshot: str
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
comp = "lease"
action = "change"
# Construct URL
url = self.change_lease.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
if proposed_lease_id is not None:
header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
change_lease.metadata = {'url': '/{shareName}'}
def renew_lease(self, lease_id, timeout=None, sharesnapshot=None, request_id=None, cls=None, **kwargs):
"""The Lease Share operation establishes and manages a lock on a share, or
the specified snapshot for set and delete share operations.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param sharesnapshot: The snapshot parameter is an opaque DateTime
value that, when present, specifies the share snapshot to query.
:type sharesnapshot: str
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
comp = "lease"
action = "renew"
# Construct URL
url = self.renew_lease.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
renew_lease.metadata = {'url': '/{shareName}'}
def break_lease(self, timeout=None, break_period=None, request_id=None, sharesnapshot=None, lease_access_conditions=None, cls=None, **kwargs):
"""The Lease Share operation establishes and manages a lock on a share, or
the specified snapshot for set and delete share operations.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param break_period: For a break operation, proposed duration the
lease should continue before it is broken, in seconds, between 0 and
60. This break period is only used if it is shorter than the time
remaining on the lease. If longer, the time remaining on the lease is
used. A new lease will not be available before the break period has
expired, but the lease may be held for longer than the break period.
If this header does not appear with a break operation, a
fixed-duration lease breaks after the remaining lease period elapses,
and an infinite lease breaks immediately.
:type break_period: int
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param sharesnapshot: The snapshot parameter is an opaque DateTime
value that, when present, specifies the share snapshot to query.
:type sharesnapshot: str
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.fileshare.models.LeaseAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
comp = "lease"
action = "break"
# Construct URL
url = self.break_lease.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
# Construct headers
header_parameters = {}
if break_period is not None:
header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')),
'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
break_lease.metadata = {'url': '/{shareName}'}
def create_snapshot(self, timeout=None, metadata=None, cls=None, **kwargs):
"""Creates a read-only snapshot of a share.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param metadata: A name-value pair to associate with a file storage
object.
:type metadata: str
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
comp = "snapshot"
# Construct URL
url = self.create_snapshot.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')),
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
create_snapshot.metadata = {'url': '/{shareName}'}
def create_permission(self, share_permission, timeout=None, cls=None, **kwargs):
"""Create a permission (a security descriptor).
:param share_permission: A permission (a security descriptor) at the
share level.
:type share_permission:
~azure.storage.fileshare.models.SharePermission
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
comp = "filepermission"
# Construct URL
url = self.create_permission.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
# Construct body
body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
create_permission.metadata = {'url': '/{shareName}'}
def get_permission(self, file_permission_key, timeout=None, cls=None, **kwargs):
"""Returns the permission (security descriptor) for a given key.
:param file_permission_key: Key of the permission to be set for the
directory/file.
:type file_permission_key: str
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param callable cls: A custom type or function that will be passed the
direct response
:return: SharePermission or the result of cls(response)
:rtype: ~azure.storage.fileshare.models.SharePermission
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
comp = "filepermission"
# Construct URL
url = self.get_permission.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharePermission', response)
header_dict = {
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
if cls:
return cls(response, deserialized, header_dict)
return deserialized
get_permission.metadata = {'url': '/{shareName}'}
def set_properties(self, timeout=None, quota=None, access_tier=None, root_squash=None, lease_access_conditions=None, cls=None, **kwargs):
"""Sets properties for the specified share.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param quota: Specifies the maximum size of the share, in gigabytes.
:type quota: int
:param access_tier: Specifies the access tier of the share. Possible
values include: 'TransactionOptimized', 'Hot', 'Cool'
:type access_tier: str or
~azure.storage.fileshare.models.ShareAccessTier
:param root_squash: Root squash to set on the share. Only valid for
NFS shares. Possible values include: 'NoRootSquash', 'RootSquash',
'AllSquash'
:type root_squash: str or
~azure.storage.fileshare.models.ShareRootSquash
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.fileshare.models.LeaseAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
comp = "properties"
# Construct URL
url = self.set_properties.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if quota is not None:
header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1)
if access_tier is not None:
header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str')
if root_squash is not None:
header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'ShareRootSquash')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
set_properties.metadata = {'url': '/{shareName}'}
def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, cls=None, **kwargs):
"""Sets one or more user-defined name-value pairs for the specified share.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param metadata: A name-value pair to associate with a file storage
object.
:type metadata: str
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.fileshare.models.LeaseAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
comp = "metadata"
# Construct URL
url = self.set_metadata.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
set_metadata.metadata = {'url': '/{shareName}'}
def get_access_policy(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs):
"""Returns information about stored access policies specified on the
share.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.fileshare.models.LeaseAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: list or the result of cls(response)
:rtype: list[~azure.storage.fileshare.models.SignedIdentifier]
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
comp = "acl"
# Construct URL
url = self.get_access_policy.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/xml'
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[SignedIdentifier]', response)
header_dict = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
if cls:
return cls(response, deserialized, header_dict)
return deserialized
get_access_policy.metadata = {'url': '/{shareName}'}
def set_access_policy(self, share_acl=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs):
"""Sets a stored access policy for use with shared access signatures.
:param share_acl: The ACL for the share.
:type share_acl:
list[~azure.storage.fileshare.models.SignedIdentifier]
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.fileshare.models.LeaseAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
comp = "acl"
# Construct URL
url = self.set_access_policy.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
# Construct body
serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}}
if share_acl is not None:
body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt)
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
set_access_policy.metadata = {'url': '/{shareName}'}
def get_statistics(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs):
"""Retrieves statistics related to the share.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.fileshare.models.LeaseAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: ShareStats or the result of cls(response)
:rtype: ~azure.storage.fileshare.models.ShareStats
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
comp = "stats"
# Construct URL
url = self.get_statistics.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/xml'
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ShareStats', response)
header_dict = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
if cls:
return cls(response, deserialized, header_dict)
return deserialized
get_statistics.metadata = {'url': '/{shareName}'}
def restore(self, timeout=None, request_id=None, deleted_share_name=None, deleted_share_version=None, cls=None, **kwargs):
"""Restores a previously deleted Share.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>
:type timeout: int
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param deleted_share_name: Specifies the name of the
preivously-deleted share.
:type deleted_share_name: str
:param deleted_share_version: Specifies the version of the
preivously-deleted share.
:type deleted_share_version: str
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.fileshare.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
comp = "undelete"
# Construct URL
url = self.restore.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
if deleted_share_name is not None:
header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str')
if deleted_share_version is not None:
header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
restore.metadata = {'url': '/{shareName}'}
| 53.590674
| 164
| 0.655488
| 8,580
| 72,401
| 5.382284
| 0.043473
| 0.013837
| 0.048333
| 0.045604
| 0.907341
| 0.884409
| 0.86767
| 0.865613
| 0.855197
| 0.850433
| 0
| 0.004175
| 0.225784
| 72,401
| 1,350
| 165
| 53.63037
| 0.819674
| 0.284181
| 0
| 0.791255
| 0
| 0
| 0.160802
| 0.023028
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025388
| false
| 0
| 0.002821
| 0
| 0.059238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
079b8f98b49a5182fb021080823718b367bdc393
| 5,390
|
py
|
Python
|
tests/test_parser_delete_extracts.py
|
playkazoomedia/tabcmd2
|
a89db9be6047d95379a7c88264236e9cb3e78189
|
[
"MIT"
] | 11
|
2020-09-02T03:41:01.000Z
|
2022-01-20T12:38:20.000Z
|
tests/test_parser_delete_extracts.py
|
playkazoomedia/tabcmd2
|
a89db9be6047d95379a7c88264236e9cb3e78189
|
[
"MIT"
] | 19
|
2020-09-03T04:54:47.000Z
|
2022-01-31T17:41:19.000Z
|
tests/test_parser_delete_extracts.py
|
playkazoomedia/tabcmd2
|
a89db9be6047d95379a7c88264236e9cb3e78189
|
[
"MIT"
] | 6
|
2020-11-21T15:45:51.000Z
|
2022-01-24T12:26:20.000Z
|
import unittest
try:
from unittest import mock
except ImportError:
import mock
import argparse
from pythontabcmd2.parsers.delete_extracts_parser import DeleteExtractsParser
class DeleteExtractsParserTest(unittest.TestCase):
@mock.patch('argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace(datasource="testproject",
parent_project_path="abcdef",
embedded_datasources='desc',
url="1234",
encrypt=None,
project="test123",
workbook="workbooktest",
include_all="test",
username="helloworld",
site="",
logging_level="info",
password="testing123",
no_prompt=True, token=None,
token_name=None,
cookie=True,
no_cookie=False,
prompt=False,
))
def test_delete_extract_parser_optional_arguments(self, mock_args):
args = DeleteExtractsParser.delete_extracts_parser()
assert args == argparse.Namespace(datasource="testproject",
parent_project_path="abcdef",
embedded_datasources='desc',
url="1234",
encrypt=None,
project="test123",
workbook="workbooktest",
include_all="test",
username="helloworld",
site="",
logging_level="info",
password="testing123",
no_prompt=True, token=None,
token_name=None,
cookie=True,
no_cookie=False,
prompt=False)
@mock.patch('argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace())
def test_delete_extract_parser_missing_all_args(self, mock_args):
with self.assertRaises(AttributeError):
args = DeleteExtractsParser.delete_extracts_parser()
@mock.patch('argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace(datasource="testproject",
embedded_datasources='desc',
url="1234",
encrypt=None,
project="test123",
workbook="workbooktest",
include_all="test",
username="helloworld",
site="",
logging_level="info",
password="testing123",
no_prompt=True, token=None,
token_name=None,
cookie=True,
no_cookie=False,
prompt=False,
))
def test_delete_extract_parser_missing_project_path(self, mock_args):
args = DeleteExtractsParser.delete_extracts_parser()
with self.assertRaises(AssertionError):
assert args != argparse.Namespace(datasource="testproject",
embedded_datasources='desc',
url="1234",
encrypt=None,
project="test123",
workbook="workbooktest",
include_all="test",
username="helloworld",
site="",
logging_level="info",
password="testing123",
no_prompt=True, token=None,
token_name=None,
cookie=True,
no_cookie=False,
prompt=False)
| 56.736842
| 77
| 0.345083
| 282
| 5,390
| 6.375887
| 0.244681
| 0.047275
| 0.044494
| 0.084538
| 0.827586
| 0.803115
| 0.769744
| 0.769744
| 0.707453
| 0.707453
| 0
| 0.018885
| 0.597217
| 5,390
| 94
| 78
| 57.340426
| 0.809304
| 0
| 0
| 0.818182
| 0
| 0
| 0.07013
| 0.018924
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.034091
| false
| 0.045455
| 0.068182
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
07a8b5e3c831808e6bc7bc89d714679657d91b6c
| 16,926
|
py
|
Python
|
src/datamigration/azext_datamigration/generated/_help.py
|
calvin197/azure-cli-extensions
|
65fac44b8dd0e1523c3866be5ec92eeb9b3a54f5
|
[
"MIT"
] | 1
|
2022-02-18T00:16:47.000Z
|
2022-02-18T00:16:47.000Z
|
src/datamigration/azext_datamigration/generated/_help.py
|
calvin197/azure-cli-extensions
|
65fac44b8dd0e1523c3866be5ec92eeb9b3a54f5
|
[
"MIT"
] | null | null | null |
src/datamigration/azext_datamigration/generated/_help.py
|
calvin197/azure-cli-extensions
|
65fac44b8dd0e1523c3866be5ec92eeb9b3a54f5
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['datamigration'] = '''
type: group
short-summary: Manage Data Migration
'''
helps['datamigration sql-managed-instance'] = """
type: group
short-summary: Manage database migrations to SQL Managed Instance.
"""
helps['datamigration sql-managed-instance show'] = """
type: command
short-summary: "Retrieve the specified database migration for a given SQL Managed Instance."
examples:
- name: Get Database Migration resource.
text: |-
az datamigration sql-managed-instance show --managed-instance-name "managedInstance1" --resource-group \
"testrg" --target-db-name "db1"
"""
helps['datamigration sql-managed-instance create'] = """
type: command
short-summary: "Create a new database migration to a given SQL Managed Instance."
parameters:
- name: --source-sql-connection
short-summary: "Source SQL Server connection details."
long-summary: |
Usage: --source-sql-connection data-source=XX authentication=XX user-name=XX password=XX \
encrypt-connection=XX trust-server-certificate=XX
data-source: Data source.
authentication: Authentication type.
user-name: User name to connect to source SQL.
password: Password to connect to source SQL.
encrypt-connection: Whether to encrypt connection or not.
trust-server-certificate: Whether to trust server certificate or not.
- name: --offline-configuration
short-summary: "Offline configuration."
long-summary: |
Usage: --offline-configuration offline=XX last-backup-name=XX
offline: Offline migration
last-backup-name: Last backup name for offline migration. This is optional for migrations from file share. \
If it is not provided, then the service will determine the last backup file name based on latest backup files present \
in file share.
- name: --target-location
short-summary: "Target location for copying backups."
long-summary: |
Usage: --target-location storage-account-resource-id=XX account-key=XX
storage-account-resource-id: Resource Id of the storage account copying backups.
account-key: Storage Account Key.
examples:
- name: Create or Update Database Migration resource with Maximum parameters.
text: |-
az datamigration sql-managed-instance create --managed-instance-name "managedInstance1" \
--source-location "{\\"fileShare\\":{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\bbb\\\\\\\\ccc\\",\\"password\\":\\"placeholder\
\\",\\"username\\":\\"name\\"}}" --target-location account-key="abcd" storage-account-resource-id="account.database.win\
dows.net" --migration-service "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg/providers/Micr\
osoft.DataMigration/sqlMigrationServices/testagent" --offline-configuration last-backup-name="last_backup_file_name" \
offline=true --scope "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg/providers/Microsoft.Sql\
/managedInstances/instance" --source-database-name "aaa" --source-sql-connection authentication="WindowsAuthentication"\
data-source="aaa" encrypt-connection=true password="placeholder" trust-server-certificate=true user-name="bbb" \
--resource-group "testrg" --target-db-name "db1"
- name: Create or Update Database Migration resource with Minimum parameters.
text: |-
az datamigration sql-managed-instance create --managed-instance-name "managedInstance1" \
--source-location "{\\"fileShare\\":{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\bbb\\\\\\\\ccc\\",\\"password\\":\\"placeholder\
\\",\\"username\\":\\"name\\"}}" --target-location account-key="abcd" storage-account-resource-id="account.database.win\
dows.net" --migration-service "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg/providers/Micr\
osoft.DataMigration/sqlMigrationServices/testagent" --offline-configuration last-backup-name="last_backup_file_name" \
offline=true --scope "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg/providers/Microsoft.Sql\
/managedInstances/instance" --source-database-name "aaa" --source-sql-connection authentication="WindowsAuthentication"\
data-source="aaa" encrypt-connection=true password="placeholder" trust-server-certificate=true user-name="bbb" \
--resource-group "testrg" --target-db-name "db1"
"""
helps['datamigration sql-managed-instance cancel'] = """
type: command
short-summary: "Stop in-progress database migration to SQL Managed Instance."
examples:
- name: Stop ongoing migration for the database.
text: |-
az datamigration sql-managed-instance cancel --managed-instance-name "managedInstance1" \
--migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" --resource-group "testrg" --target-db-name "db1"
"""
helps['datamigration sql-managed-instance cutover'] = """
type: command
short-summary: "Initiate cutover for in-progress online database migration to SQL Managed Instance."
examples:
- name: Cutover online migration operation for the database.
text: |-
az datamigration sql-managed-instance cutover --managed-instance-name "managedInstance1" \
--migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" --resource-group "testrg" --target-db-name "db1"
"""
helps['datamigration sql-managed-instance wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the datamigration sql-managed-instance is \
met.
examples:
- name: Pause executing next line of CLI script until the datamigration sql-managed-instance is successfully \
created.
text: |-
az datamigration sql-managed-instance wait --managed-instance-name "managedInstance1" --resource-group \
"testrg" --target-db-name "db1" --created
"""
helps['datamigration sql-vm'] = """
type: group
short-summary: Manage database migrations to SQL VM.
"""
helps['datamigration sql-vm show'] = """
type: command
short-summary: "Retrieve the specified database migration for a given SQL VM."
examples:
- name: Get Database Migration resource.
text: |-
az datamigration sql-vm show --resource-group "testrg" --sql-vm-name "testvm" --target-db-name "db1"
"""
helps['datamigration sql-vm create'] = """
type: command
short-summary: "Create a new database migration to a given SQL VM."
parameters:
- name: --source-sql-connection
short-summary: "Source SQL Server connection details."
long-summary: |
Usage: --source-sql-connection data-source=XX authentication=XX user-name=XX password=XX \
encrypt-connection=XX trust-server-certificate=XX
data-source: Data source.
authentication: Authentication type.
user-name: User name to connect to source SQL.
password: Password to connect to source SQL.
encrypt-connection: Whether to encrypt connection or not.
trust-server-certificate: Whether to trust server certificate or not.
- name: --offline-configuration
short-summary: "Offline configuration."
long-summary: |
Usage: --offline-configuration offline=XX last-backup-name=XX
offline: Offline migration
last-backup-name: Last backup name for offline migration. This is optional for migrations from file share. \
If it is not provided, then the service will determine the last backup file name based on latest backup files present \
in file share.
- name: --target-location
short-summary: "Target location for copying backups."
long-summary: |
Usage: --target-location storage-account-resource-id=XX account-key=XX
storage-account-resource-id: Resource Id of the storage account copying backups.
account-key: Storage Account Key.
examples:
- name: Create or Update Database Migration resource with Maximum parameters.
text: |-
az datamigration sql-vm create --source-location "{\\"fileShare\\":{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\b\
bb\\\\\\\\ccc\\",\\"password\\":\\"placeholder\\",\\"username\\":\\"name\\"}}" --target-location account-key="abcd" \
storage-account-resource-id="account.database.windows.net" --migration-service "/subscriptions/00000000-1111-2222-3333-\
444444444444/resourceGroups/testrg/providers/Microsoft.DataMigration/sqlMigrationServices/testagent" \
--offline-configuration last-backup-name="last_backup_file_name" offline=true --scope "/subscriptions/00000000-1111-222\
2-3333-444444444444/resourceGroups/testrg/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/testvm" \
--source-database-name "aaa" --source-sql-connection authentication="WindowsAuthentication" data-source="aaa" \
encrypt-connection=true password="placeholder" trust-server-certificate=true user-name="bbb" --resource-group "testrg" \
--sql-vm-name "testvm" --target-db-name "db1"
- name: Create or Update Database Migration resource with Minimum parameters.
text: |-
az datamigration sql-vm create --source-location "{\\"fileShare\\":{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\b\
bb\\\\\\\\ccc\\",\\"password\\":\\"placeholder\\",\\"username\\":\\"name\\"}}" --target-location account-key="abcd" \
storage-account-resource-id="account.database.windows.net" --migration-service "/subscriptions/00000000-1111-2222-3333-\
444444444444/resourceGroups/testrg/providers/Microsoft.DataMigration/sqlMigrationServices/testagent" \
--offline-configuration last-backup-name="last_backup_file_name" offline=true --scope "/subscriptions/00000000-1111-222\
2-3333-444444444444/resourceGroups/testrg/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/testvm" \
--source-database-name "aaa" --source-sql-connection authentication="WindowsAuthentication" data-source="aaa" \
encrypt-connection=true password="placeholder" trust-server-certificate=true user-name="bbb" --resource-group "testrg" \
--sql-vm-name "testvm" --target-db-name "db1"
"""
helps['datamigration sql-vm cancel'] = """
type: command
short-summary: "Stop in-progress database migration to SQL VM."
examples:
- name: Stop ongoing migration for the database.
text: |-
az datamigration sql-vm cancel --migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" \
--resource-group "testrg" --sql-vm-name "testvm" --target-db-name "db1"
"""
helps['datamigration sql-vm cutover'] = """
type: command
short-summary: "Initiate cutover for in-progress online database migration to SQL VM."
examples:
- name: Cutover online migration operation for the database.
text: |-
az datamigration sql-vm cutover --migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" \
--resource-group "testrg" --sql-vm-name "testvm" --target-db-name "db1"
"""
helps['datamigration sql-vm wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the datamigration sql-vm is met.
examples:
- name: Pause executing next line of CLI script until the datamigration sql-vm is successfully created.
text: |-
az datamigration sql-vm wait --resource-group "testrg" --sql-vm-name "testvm" --target-db-name "db1" \
--created
"""
helps['datamigration sql-service'] = """
type: group
short-summary: Manage Database Migration Service.
"""
helps['datamigration sql-service list'] = """
type: command
short-summary: "Retrieve all Database Migration Services in the resource group. And Retrieve all Database \
Migration Services in the subscription."
examples:
- name: Get Migration Services in the Resource Group.
text: |-
az datamigration sql-service list --resource-group "testrg"
- name: Get Services in the Subscriptions.
text: |-
az datamigration sql-service list
"""
helps['datamigration sql-service show'] = """
type: command
short-summary: "Retrieve the Database Migration Service."
examples:
- name: Get Migration Service.
text: |-
az datamigration sql-service show --resource-group "testrg" --name "service1"
"""
helps['datamigration sql-service create'] = """
type: command
short-summary: "Create Database Migration Service."
examples:
- name: Create or Update SQL Migration Service with maximum parameters.
text: |-
az datamigration sql-service create --location "northeurope" --resource-group "testrg" --name \
"testagent"
- name: Create or Update SQL Migration Service with minimum parameters.
text: |-
az datamigration sql-service create --location "northeurope" --resource-group "testrg" --name \
"testagent"
"""
helps['datamigration sql-service update'] = """
type: command
short-summary: "Update Database Migration Service."
examples:
- name: Update SQL Migration Service.
text: |-
az datamigration sql-service update --tags mytag="myval" --resource-group "testrg" --name "testagent"
"""
helps['datamigration sql-service delete'] = """
type: command
short-summary: "Delete Database Migration Service."
examples:
- name: Delete SQL Migration Service.
text: |-
az datamigration sql-service delete --resource-group "testrg" --name "service1"
"""
helps['datamigration sql-service delete-node'] = """
type: command
short-summary: "Delete the integration runtime node."
examples:
- name: Delete the integration runtime node.
text: |-
az datamigration sql-service delete-node --ir-name "IRName" --node-name "nodeName" --resource-group \
"testrg" --name "service1"
"""
helps['datamigration sql-service list-auth-key'] = """
type: command
short-summary: "Retrieve the List of Authentication Keys for Self Hosted Integration Runtime."
examples:
- name: Retrieve the List of Authentication Keys.
text: |-
az datamigration sql-service list-auth-key --resource-group "testrg" --name "service1"
"""
helps['datamigration sql-service list-integration-runtime-metric'] = """
type: command
short-summary: "Retrieve the registered Integration Runtine nodes and their monitoring data for a given Database \
Migration Service."
examples:
- name: Retrieve the Monitoring Data.
text: |-
az datamigration sql-service list-integration-runtime-metric --resource-group "testrg" --name \
"service1"
"""
helps['datamigration sql-service list-migration'] = """
type: command
short-summary: "Retrieve the List of database migrations attached to the service."
examples:
- name: List database migrations attached to the service.
text: |-
az datamigration sql-service list-migration --resource-group "testrg" --name "service1"
"""
helps['datamigration sql-service regenerate-auth-key'] = """
type: command
short-summary: "Regenerate a new set of Authentication Keys for Self Hosted Integration Runtime."
examples:
- name: Regenerate the of Authentication Keys.
text: |-
az datamigration sql-service regenerate-auth-key --key-name "authKey1" --resource-group "testrg" --name \
"service1"
"""
helps['datamigration sql-service wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the datamigration sql-service is met.
examples:
- name: Pause executing next line of CLI script until the datamigration sql-service is successfully created.
text: |-
az datamigration sql-service wait --resource-group "testrg" --name "service1" --created
- name: Pause executing next line of CLI script until the datamigration sql-service is successfully updated.
text: |-
az datamigration sql-service wait --resource-group "testrg" --name "service1" --updated
- name: Pause executing next line of CLI script until the datamigration sql-service is successfully deleted.
text: |-
az datamigration sql-service wait --resource-group "testrg" --name "service1" --deleted
"""
| 48.778098
| 120
| 0.686931
| 1,944
| 16,926
| 5.97428
| 0.114712
| 0.081281
| 0.061391
| 0.051145
| 0.929396
| 0.882211
| 0.837093
| 0.806871
| 0.768641
| 0.729551
| 0
| 0.026935
| 0.18185
| 16,926
| 346
| 121
| 48.919075
| 0.811742
| 0.027768
| 0
| 0.676568
| 0
| 0.181518
| 0.966738
| 0.220371
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.039604
| 0.0033
| 0
| 0.0033
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
07c937f7f47d379ec282e5d3630e6c4092024b4c
| 20,071
|
py
|
Python
|
src/utils/bounding_box_utils.py
|
aashish2000/Head-Tracking
|
7092263b4f9d99d98f605888eb9cf81ad407adcb
|
[
"MIT"
] | 1
|
2022-02-16T20:23:30.000Z
|
2022-02-16T20:23:30.000Z
|
SSD/bounding_box_utils/bounding_box_utils.py
|
labITS-stt-eesc/Axle_Detection
|
ea8ed4557c3319dfb125f6c62b7c5de41a922133
|
[
"MIT"
] | 2
|
2021-03-23T12:19:07.000Z
|
2021-06-24T14:34:58.000Z
|
SSD/bounding_box_utils/bounding_box_utils.py
|
labITS-stt-eesc/Axle_Detection
|
ea8ed4557c3319dfb125f6c62b7c5de41a922133
|
[
"MIT"
] | null | null | null |
'''
Includes:
* Function to compute the IoU similarity for axis-aligned, rectangular, 2D bounding boxes
* Function for coordinate conversion for axis-aligned, rectangular, 2D bounding boxes
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
def convert_coordinates(tensor, start_index, conversion, border_pixels='half'):
'''
Convert coordinates for axis-aligned 2D boxes between two coordinate formats.
Creates a copy of `tensor`, i.e. does not operate in place. Currently there are
three supported coordinate formats that can be converted from and to each other:
1) (xmin, xmax, ymin, ymax) - the 'minmax' format
2) (xmin, ymin, xmax, ymax) - the 'corners' format
2) (cx, cy, w, h) - the 'centroids' format
Arguments:
tensor (array): A Numpy nD array containing the four consecutive coordinates
to be converted somewhere in the last axis.
start_index (int): The index of the first coordinate in the last axis of `tensor`.
conversion (str, optional): The conversion direction. Can be 'minmax2centroids',
'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners',
or 'corners2minmax'.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A Numpy nD array, a copy of the input tensor with the converted coordinates
in place of the original coordinates and the unaltered elements of the original
tensor elsewhere.
'''
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1
elif border_pixels == 'exclude':
d = -1
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind+1]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+2] + tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind] + d # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - tensor[..., ind+2] + d # Set h
elif conversion == 'centroids2minmax':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind] + tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+3] = tensor[..., ind+1] + tensor[..., ind+3] / 2.0 # Set ymax
elif conversion == 'corners2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind+2]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+1] + tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+2] - tensor[..., ind] + d # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - tensor[..., ind+1] + d # Set h
elif conversion == 'centroids2corners':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind+1] - tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+2] = tensor[..., ind] + tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+3] = tensor[..., ind+1] + tensor[..., ind+3] / 2.0 # Set ymax
elif (conversion == 'minmax2corners') or (conversion == 'corners2minmax'):
tensor1[..., ind+1] = tensor[..., ind+2]
tensor1[..., ind+2] = tensor[..., ind+1]
else:
raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids', 'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners', and 'corners2minmax'.")
return tensor1
def convert_coordinates2(tensor, start_index, conversion):
'''
A matrix multiplication implementation of `convert_coordinates()`.
Supports only conversion between the 'centroids' and 'minmax' formats.
This function is marginally slower on average than `convert_coordinates()`,
probably because it involves more (unnecessary) arithmetic operations (unnecessary
because the two matrices are sparse).
For details please refer to the documentation of `convert_coordinates()`.
'''
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
M = np.array([[0.5, 0. , -1., 0.],
[0.5, 0. , 1., 0.],
[0. , 0.5, 0., -1.],
[0. , 0.5, 0., 1.]])
tensor1[..., ind:ind+4] = np.dot(tensor1[..., ind:ind+4], M)
elif conversion == 'centroids2minmax':
M = np.array([[ 1. , 1. , 0. , 0. ],
[ 0. , 0. , 1. , 1. ],
[-0.5, 0.5, 0. , 0. ],
[ 0. , 0. , -0.5, 0.5]]) # The multiplicative inverse of the matrix above
tensor1[..., ind:ind+4] = np.dot(tensor1[..., ind:ind+4], M)
else:
raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids' and 'centroids2minmax'.")
return tensor1
def intersection_area(boxes1, boxes2, coords='centroids', mode='outer_product', border_pixels='half'):
'''
Computes the intersection areas of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the intersection areas for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`.
boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`.
coords (str, optional): The coordinate format in the input arrays. Can be either 'centroids' for the format
`(cx, cy, w, h)`, 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format
`(xmin, ymin, xmax, ymax)`.
mode (str, optional): Can be one of 'outer_product' and 'element-wise'. In 'outer_product' mode, returns an
`(m,n)` matrix with the intersection areas for all possible combinations of the `m` boxes in `boxes1` with the
`n` boxes in `boxes2`. In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2`
must be boadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of
length `m` where the i-th position contains the intersection area of `boxes1[i]` with `boxes2[i]`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values with
the intersection areas of the boxes in `boxes1` and `boxes2`.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError("All boxes must consist of 4 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(boxes1.shape[1], boxes2.shape[1]))
if not mode in {'outer_product', 'element-wise'}: raise ValueError("`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.",format(mode))
# Convert the coordinates if necessary.
if coords == 'centroids':
boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2corners')
boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2corners')
coords = 'corners'
elif not (coords in {'minmax', 'corners'}):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
# Compute the intersection areas.
if mode == 'outer_product':
# For all possible box combinations, get the greater xmin and ymin values.
# This is a tensor of shape (m,n,2).
min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:,[xmin,ymin]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:,[xmin,ymin]], axis=0), reps=(m, 1, 1)))
# For all possible box combinations, get the smaller xmax and ymax values.
# This is a tensor of shape (m,n,2).
max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:,[xmax,ymax]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:,[xmax,ymax]], axis=0), reps=(m, 1, 1)))
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:,:,0] * side_lengths[:,:,1]
elif mode == 'element-wise':
min_xy = np.maximum(boxes1[:,[xmin,ymin]], boxes2[:,[xmin,ymin]])
max_xy = np.minimum(boxes1[:,[xmax,ymax]], boxes2[:,[xmax,ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:,0] * side_lengths[:,1]
def intersection_area_(boxes1, boxes2, coords='corners', mode='outer_product', border_pixels='half'):
'''
The same as 'intersection_area()' but for internal use, i.e. without all the safety checks.
'''
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
# Compute the intersection areas.
if mode == 'outer_product':
# For all possible box combinations, get the greater xmin and ymin values.
# This is a tensor of shape (m,n,2).
min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:,[xmin,ymin]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:,[xmin,ymin]], axis=0), reps=(m, 1, 1)))
# For all possible box combinations, get the smaller xmax and ymax values.
# This is a tensor of shape (m,n,2).
max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:,[xmax,ymax]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:,[xmax,ymax]], axis=0), reps=(m, 1, 1)))
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:,:,0] * side_lengths[:,:,1]
elif mode == 'element-wise':
min_xy = np.maximum(boxes1[:,[xmin,ymin]], boxes2[:,[xmin,ymin]])
max_xy = np.minimum(boxes1[:,[xmax,ymax]], boxes2[:,[xmax,ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:,0] * side_lengths[:,1]
def iou(boxes1, boxes2, coords='centroids', mode='outer_product', border_pixels='half'):
'''
Computes the intersection-over-union similarity (also known as Jaccard similarity)
of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the IoUs for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`.
boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`.
coords (str, optional): The coordinate format in the input arrays. Can be either 'centroids' for the format
`(cx, cy, w, h)`, 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format
`(xmin, ymin, xmax, ymax)`.
mode (str, optional): Can be one of 'outer_product' and 'element-wise'. In 'outer_product' mode, returns an
`(m,n)` matrix with the IoU overlaps for all possible combinations of the `m` boxes in `boxes1` with the
`n` boxes in `boxes2`. In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2`
must be boadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of
length `m` where the i-th position contains the IoU overlap of `boxes1[i]` with `boxes2[i]`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values in [0,1],
the Jaccard similarity of the boxes in `boxes1` and `boxes2`. 0 means there is no overlap between two given
boxes, 1 means their coordinates are identical.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError("All boxes must consist of 4 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(boxes1.shape[1], boxes2.shape[1]))
if not mode in {'outer_product', 'element-wise'}: raise ValueError("`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.".format(mode))
# Convert the coordinates if necessary.
if coords == 'centroids':
boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2corners')
boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2corners')
coords = 'corners'
elif not (coords in {'minmax', 'corners'}):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
# Compute the IoU.
# Compute the interesection areas.
intersection_areas = intersection_area_(boxes1, boxes2, coords=coords, mode=mode)
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Compute the union areas.
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
if mode == 'outer_product':
boxes1_areas = np.tile(np.expand_dims((boxes1[:,xmax] - boxes1[:,xmin] + d) * (boxes1[:,ymax] - boxes1[:,ymin] + d), axis=1), reps=(1,n))
boxes2_areas = np.tile(np.expand_dims((boxes2[:,xmax] - boxes2[:,xmin] + d) * (boxes2[:,ymax] - boxes2[:,ymin] + d), axis=0), reps=(m,1))
elif mode == 'element-wise':
boxes1_areas = (boxes1[:,xmax] - boxes1[:,xmin] + d) * (boxes1[:,ymax] - boxes1[:,ymin] + d)
boxes2_areas = (boxes2[:,xmax] - boxes2[:,xmin] + d) * (boxes2[:,ymax] - boxes2[:,ymin] + d)
union_areas = boxes1_areas + boxes2_areas - intersection_areas
return intersection_areas / union_areas
| 52.4047
| 236
| 0.631857
| 2,833
| 20,071
| 4.434169
| 0.106601
| 0.032479
| 0.013135
| 0.011384
| 0.806082
| 0.784986
| 0.769782
| 0.75609
| 0.755931
| 0.752189
| 0
| 0.030437
| 0.242091
| 20,071
| 383
| 237
| 52.4047
| 0.795359
| 0.503513
| 0
| 0.745562
| 0
| 0.017751
| 0.168826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029586
| false
| 0
| 0.011834
| 0
| 0.08284
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed41f3cd70fa1dd4c9e784b84412f66d6746fcd5
| 4,935
|
py
|
Python
|
src/ralph/discovery/tests/plugins/samples/donpedro.py
|
jjagodzinski/ralph
|
000a22bcc934dc2051e7a09ab1e84bd1c25a9e73
|
[
"Apache-2.0"
] | null | null | null |
src/ralph/discovery/tests/plugins/samples/donpedro.py
|
jjagodzinski/ralph
|
000a22bcc934dc2051e7a09ab1e84bd1c25a9e73
|
[
"Apache-2.0"
] | null | null | null |
src/ralph/discovery/tests/plugins/samples/donpedro.py
|
jjagodzinski/ralph
|
000a22bcc934dc2051e7a09ab1e84bd1c25a9e73
|
[
"Apache-2.0"
] | null | null | null |
data = """{"data":{"storage": [{"sn":"03da1030-f25b-47","mountpoint":"C:","size":"40957","label":"XENSRC PVDISK SCSI Disk Device"}],
"ethernets": [{"mac":"6A:77:98:51:05:13","speed":"1000000000","label":"Citrix PV Ethernet Adapter #0","ipaddress":"10.100.0.10"}],
"memory": [{"size":"3068","speed":"","index":"DIMM 0","sn":"","caption":"Physical Memory","label":"Physical Memory"}],
"operating_system": {"memory":"3067","storage":"40957","corescount":"2","label":"Microsoft Windows Server 2008 R2 Standard"},
"processors": [{"speed":"2667","cores":"1","index":"CPU0","description":"Intel64 Family 6 Model 44 Stepping 2","numberoflogicalprocessors":"1","caption":"Intel64 Family 6 Model 44 Stepping 2","label":"Intel(R) Xeon(R) CPU E5640 @ 2.67GHz"},{"speed":"2667","cores":"1","index":"CPU1","description":"Intel64 Family 6 Model 44 Stepping 2","numberoflogicalprocessors":"1","caption":"Intel64 Family 6 Model 44 Stepping 2","label":"Intel(R) Xeon(R) CPU E5640 @ 2.67GHz"}],
"device": {"sn":"6ddaaa4a-dc00-de38-e683-da037fd729ca","caption":"Computer System Product","version":"4.1.2","vendor":"Xen","label":"HVM domU"},
"fcs": [{"physicalid":"0","model":"QMH2462","sn":"MY572520SK","manufacturer":"QLogic Corporation","label":"QLogic QMH2462 Fibre Channel Adapter"},{"physicalid":"1","model":"QMH2462","sn":"MY572520SK","manufacturer":"QLogic Corporation","label":"QLogic QMH2462 Fibre Channel Adapter"}],
"shares": [{"volume":"3PARdata VV Multi-Path Disk Device","sn":"25D304C1","label":"3PARdata VV Multi-Path Disk Device"},
{"volume":"3PARdata VV Multi-Path Disk Device","sn":"80C804C1","label":"3PARdata VV Multi-Path Disk Device"}],
"software":[{"vendor":"Vendor 1","label":"Soft 1","version":"1.2.33"}, {"vendor":"Vendor 2","label":"Soft 2","version":"0.8.99"}]
}}
"""
incomplete_data = """{"data":{"storage": [{"sn":"03da1030-f25b-47","mountpoint":"C:","size":"40957","label":"XENSRC PVDISK SCSI Disk Device"}],
"ethernets": [],
"memory": [{"size":"3068","speed":"","index":"DIMM 0","sn":"","caption":"Physical Memory","label":"Physical Memory"}],
"operating_system": {"memory":"3067","storage":"40957","corescount":"8","label":"Microsoft Windows Server 2008 R2 Standard"},
"processors": [{"speed":"2667","cores":"4","index":"CPU0","description":"Intel64 Family 6 Model 44 Stepping 2","numberoflogicalprocessors":"1","caption":"Intel64 Family 6 Model 44 Stepping 2","label":"Intel(R) Xeon(R) CPU E5640 @ 2.67GHz"},{"speed":"2667","cores":"4","index":"CPU1","description":"Intel64 Family 6 Model 44 Stepping 2","numberoflogicalprocessors":"1","caption":"Intel64 Family 6 Model 44 Stepping 2","label":"Intel(R) Xeon(R) CPU E5640 @ 2.67GHz"}],
"device": {"sn":"","caption":"Computer System Product","version":"4.1.2","vendor":"Test 1","label":"HVM domU"},
"fcs": [{"physicalid":"0","model":"QMH2462","sn":"MY572520SK","manufacturer":"QLogic Corporation","label":"QLogic QMH2462 Fibre Channel Adapter"},{"physicalid":"1","model":"QMH2462","sn":"MY572520SK","manufacturer":"QLogic Corporation","label":"QLogic QMH2462 Fibre Channel Adapter"}],
"shares": [{"volume":"3PARdata VV Multi-Path Disk Device","sn":"25D304C1","label":"3PARdata VV Multi-Path Disk Device"},
{"volume":"3PARdata VV Multi-Path Disk Device","sn":"80C804C1","label":"3PARdata VV Multi-Path Disk Device"}]
}}
"""
no_eth_data = """{"data":{"storage": [{"sn":"03da1030-f25b-48","mountpoint":"C:","size":"40957","label":"XENSRC PVDISK SCSI Disk Device"}],
"ethernets": [],
"memory": [{"size":"3068","speed":"","index":"DIMM 0","sn":"","caption":"Physical Memory","label":"Physical Memory"}],
"operating_system": {"memory":"3067","storage":"40957","corescount":"8","label":"Microsoft Windows Server 2008 R2 Standard"},
"processors": [{"speed":"2667","cores":"4","index":"CPU0","description":"Intel64 Family 6 Model 44 Stepping 2","numberoflogicalprocessors":"1","caption":"Intel64 Family 6 Model 44 Stepping 2","label":"Intel(R) Xeon(R) CPU E5640 @ 2.67GHz"},{"speed":"2667","cores":"4","index":"CPU1","description":"Intel64 Family 6 Model 44 Stepping 2","numberoflogicalprocessors":"1","caption":"Intel64 Family 6 Model 44 Stepping 2","label":"Intel(R) Xeon(R) CPU E5640 @ 2.67GHz"}],
"device": {"sn":"7ddaaa4a-dc00-de38-e683-da037fd729ac","caption":"Computer System Product","version":"4.1.2","vendor":"Test 2","label":"HVM domU"},
"fcs": [{"physicalid":"0","model":"QMH2462","sn":"MY572520SK","manufacturer":"QLogic Corporation","label":"QLogic QMH2462 Fibre Channel Adapter"},{"physicalid":"1","model":"QMH2462","sn":"MY572520SK","manufacturer":"QLogic Corporation","label":"QLogic QMH2462 Fibre Channel Adapter"}],
"shares": [{"volume":"3PARdata VV Multi-Path Disk Device","sn":"25D304C2","label":"3PARdata VV Multi-Path Disk Device"},
{"volume":"3PARdata VV Multi-Path Disk Device","sn":"80C804C2","label":"3PARdata VV Multi-Path Disk Device"}]
}}
"""
| 133.378378
| 489
| 0.66464
| 635
| 4,935
| 5.155906
| 0.181102
| 0.045816
| 0.051313
| 0.06964
| 0.922114
| 0.921503
| 0.912645
| 0.90226
| 0.90226
| 0.888821
| 0
| 0.106496
| 0.082877
| 4,935
| 36
| 490
| 137.083333
| 0.61688
| 0
| 0
| 0.588235
| 0
| 0.764706
| 0.987437
| 0.572239
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
71f4054d31efdcfd0220f8bcfdd5726d2c0f20ec
| 178
|
py
|
Python
|
events/contrib/plugins/form_handlers/db_store/urls/__init__.py
|
mansonul/events
|
4f6ca37bc600dcba3f74400d299826882d53b7d2
|
[
"MIT"
] | null | null | null |
events/contrib/plugins/form_handlers/db_store/urls/__init__.py
|
mansonul/events
|
4f6ca37bc600dcba3f74400d299826882d53b7d2
|
[
"MIT"
] | null | null | null |
events/contrib/plugins/form_handlers/db_store/urls/__init__.py
|
mansonul/events
|
4f6ca37bc600dcba3f74400d299826882d53b7d2
|
[
"MIT"
] | null | null | null |
# Only import from .form_handlers module to ensure backwards compatibility.
# Importing from .form_wizard_handlers module should be done explicitly.
from .form_handlers import *
| 44.5
| 75
| 0.825843
| 24
| 178
| 5.958333
| 0.666667
| 0.167832
| 0.223776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129213
| 178
| 3
| 76
| 59.333333
| 0.922581
| 0.808989
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9c3301d2e804e5ed406339cee7dd12db606c7cf4
| 3,235
|
py
|
Python
|
covgen/localsearch/trace.py
|
greenmonn/sbst-input-generator
|
3d3234af94a39ca2e3467431b84269e15f31db6f
|
[
"MIT"
] | 7
|
2019-05-16T06:58:44.000Z
|
2020-08-08T09:22:15.000Z
|
covgen/localsearch/trace.py
|
greenmonn/sbst-input-generator
|
3d3234af94a39ca2e3467431b84269e15f31db6f
|
[
"MIT"
] | null | null | null |
covgen/localsearch/trace.py
|
greenmonn/sbst-input-generator
|
3d3234af94a39ca2e3467431b84269e15f31db6f
|
[
"MIT"
] | null | null | null |
import covgen.localsearch.distance_functions as df
class Trace():
def __init__(self):
self.executed_branches = []
def get_executed_branches(self):
return self.executed_branches
def is_true(self, id, exp):
result = self.equals(id, exp, True)
distance_to_alternative = 0
if result:
distance_to_alternative = df.equals_bool(exp, False)
else:
distance_to_alternative = df.equals_bool(exp, True)
self.executed_branches.append((id, result, distance_to_alternative))
return result
def is_false(self, id, exp):
result = self.equals(id, exp, False)
distance_to_alternative = 0
if result:
distance_to_alternative = df.equals_bool(exp, True)
else:
distance_to_alternative = df.equals_bool(exp, False)
self.executed_branches.append((id, result, distance_to_alternative))
return result
def equals(self, id, lhs, rhs):
result = lhs == rhs
distance_to_alternative = 0
if result:
distance_to_alternative = df.not_equals_num(lhs, rhs)
else:
distance_to_alternative = df.equals_num(lhs, rhs)
self.executed_branches.append((id, result, distance_to_alternative))
return result
def not_equals(self, id, lhs, rhs):
result = lhs != rhs
distance_to_alternative = 0
if result:
distance_to_alternative = df.equals_num(lhs, rhs)
else:
distance_to_alternative = df.not_equals_num(lhs, rhs)
self.executed_branches.append((id, result, distance_to_alternative))
return result
def less_than(self, id, lhs, rhs):
result = lhs < rhs
distance_to_alternative = 0
if result:
distance_to_alternative = df.greater_than_or_equals(lhs, rhs)
else:
distance_to_alternative = df.less_than(lhs, rhs)
self.executed_branches.append((id, result, distance_to_alternative))
return result
def less_than_or_equals(self, id, lhs, rhs):
result = lhs <= rhs
distance_to_alternative = 0
if result:
distance_to_alternative = df.greater_than(lhs, rhs)
else:
distance_to_alternative = df.less_than_or_equals(lhs, rhs)
self.executed_branches.append((id, result, distance_to_alternative))
return result
def greater_than(self, id, lhs, rhs):
result = lhs > rhs
distance_to_alternative = 0
if result:
distance_to_alternative = df.less_than_or_equals(lhs, rhs)
else:
distance_to_alternative = df.greater_than(lhs, rhs)
self.executed_branches.append((id, result, distance_to_alternative))
return result
def greater_than_or_equals(self, id, lhs, rhs):
result = lhs >= rhs
distance_to_alternative = 0
if result:
distance_to_alternative = df.less_than(lhs, rhs)
else:
distance_to_alternative = df.greater_than_or_equals(lhs, rhs)
self.executed_branches.append((id, result, distance_to_alternative))
return result
| 25.472441
| 76
| 0.631221
| 393
| 3,235
| 4.895674
| 0.096692
| 0.16632
| 0.349272
| 0.224532
| 0.914241
| 0.914241
| 0.914241
| 0.914241
| 0.832121
| 0.772349
| 0
| 0.003478
| 0.289026
| 3,235
| 126
| 77
| 25.674603
| 0.833043
| 0
| 0
| 0.717949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.012821
| 0.012821
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
9c7a7d7f77db4b9185d504465aeeff1dade200f6
| 2,862
|
py
|
Python
|
dashboard/tests/test_signals.py
|
Tomasz-Kluczkowski/Bnice
|
75eb66a94a3bf3225691ed6802e674fbcf108571
|
[
"MIT"
] | null | null | null |
dashboard/tests/test_signals.py
|
Tomasz-Kluczkowski/Bnice
|
75eb66a94a3bf3225691ed6802e674fbcf108571
|
[
"MIT"
] | 60
|
2018-04-20T21:32:21.000Z
|
2021-09-07T23:53:31.000Z
|
dashboard/tests/test_signals.py
|
Tomasz-Kluczkowski/Bnice
|
75eb66a94a3bf3225691ed6802e674fbcf108571
|
[
"MIT"
] | null | null | null |
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from guardian.models import UserObjectPermission
class TestSmileySignals:
def test_add_smiley_object_permissions_parent_user(self, parent_user, child, smiley_custom_description):
assert parent_user.has_perm('dashboard.view_smiley_instance', smiley_custom_description)
assert parent_user.has_perm('dashboard.edit_smiley_instance', smiley_custom_description)
assert parent_user.has_perm('dashboard.delete_smiley_instance', smiley_custom_description)
def test_add_smiley_object_permissions_child_user(self, parent_user, child, child_user, smiley_custom_description):
assert child_user.has_perm('dashboard.view_smiley_instance', smiley_custom_description)
assert not child_user.has_perm('dashboard.edit_smiley_instance', smiley_custom_description)
assert not child_user.has_perm('dashboard.delete_smiley_instance', smiley_custom_description)
def test_smiley_object_permissions_removed_after_smiley_deleted(self, parent_user, child,
smiley_custom_description):
assert parent_user.has_perm('dashboard.view_smiley_instance', smiley_custom_description)
filters = Q(content_type=ContentType.objects.get_for_model(smiley_custom_description),
object_pk=smiley_custom_description.pk)
smiley_custom_description.delete()
assert not UserObjectPermission.objects.filter(filters).exists()
class TestOopsySignals:
def test_add_oopsy_object_permissions_parent_user(self, parent_user, child, oopsy_custom_description):
assert parent_user.has_perm('dashboard.view_oopsy_instance', oopsy_custom_description)
assert parent_user.has_perm('dashboard.edit_oopsy_instance', oopsy_custom_description)
assert parent_user.has_perm('dashboard.delete_oopsy_instance', oopsy_custom_description)
def test_add_oopsy_object_permissions_child_user(self, parent_user, child, child_user, oopsy_custom_description):
assert child_user.has_perm('dashboard.view_oopsy_instance', oopsy_custom_description)
assert not child_user.has_perm('dashboard.edit_oopsy_instance', oopsy_custom_description)
assert not child_user.has_perm('dashboard.delete_oopsy_instance', oopsy_custom_description)
def test_oopsy_object_permissions_removed_after_oopsy_deleted(self, parent_user, child, oopsy_custom_description):
assert parent_user.has_perm('dashboard.view_oopsy_instance', oopsy_custom_description)
filters = Q(content_type=ContentType.objects.get_for_model(oopsy_custom_description),
object_pk=oopsy_custom_description.pk)
oopsy_custom_description.delete()
assert not UserObjectPermission.objects.filter(filters).exists()
| 63.6
| 119
| 0.791055
| 348
| 2,862
| 6.04023
| 0.140805
| 0.210276
| 0.153187
| 0.133206
| 0.821123
| 0.814462
| 0.791627
| 0.791627
| 0.765937
| 0.739296
| 0
| 0
| 0.143955
| 2,862
| 44
| 120
| 65.045455
| 0.857959
| 0
| 0
| 0.176471
| 0
| 0
| 0.1471
| 0.1471
| 0
| 0
| 0
| 0
| 0.470588
| 1
| 0.176471
| false
| 0
| 0.088235
| 0
| 0.323529
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
92cc1ddd048789eb160e6826cc9b908fb674dd2d
| 26,807
|
py
|
Python
|
sdk/python/pulumi_aws/elasticache/cluster.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticache/cluster.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticache/cluster.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Cluster(pulumi.CustomResource):
apply_immediately: pulumi.Output[bool]
"""
Specifies whether any database modifications
are applied immediately, or during the next maintenance window. Default is
`false`. See [Amazon ElastiCache Documentation for more information.][1]
(Available since v0.6.0)
"""
arn: pulumi.Output[str]
availability_zone: pulumi.Output[str]
"""
The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.
"""
az_mode: pulumi.Output[str]
"""
Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `num_cache_nodes` must be greater than `1`
"""
cache_nodes: pulumi.Output[list]
"""
List of node objects including `id`, `address`, `port` and `availability_zone`.
Referenceable e.g. as `${aws_elasticache_cluster.bar.cache_nodes.0.address}`
* `address` (`str`)
* `availability_zone` (`str`) - The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.
* `id` (`str`)
* `port` (`float`) - The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`.
"""
cluster_address: pulumi.Output[str]
"""
(Memcached only) The DNS name of the cache cluster without the port appended.
"""
cluster_id: pulumi.Output[str]
"""
Group identifier. ElastiCache converts
this name to lowercase
"""
configuration_endpoint: pulumi.Output[str]
"""
(Memcached only) The configuration endpoint to allow host discovery.
"""
engine: pulumi.Output[str]
"""
Name of the cache engine to be used for this cache cluster.
Valid values for this parameter are `memcached` or `redis`
"""
engine_version: pulumi.Output[str]
"""
Version number of the cache engine to be used.
See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html)
in the AWS Documentation center for supported versions
"""
maintenance_window: pulumi.Output[str]
"""
Specifies the weekly time range for when maintenance
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
"""
node_type: pulumi.Output[str]
"""
The compute and memory capacity of the nodes. See
[Available Cache Node Types](https://aws.amazon.com/elasticache/details#Available_Cache_Node_Types) for
supported node types
"""
notification_topic_arn: pulumi.Output[str]
"""
An Amazon Resource Name (ARN) of an
SNS topic to send ElastiCache notifications to. Example:
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
"""
num_cache_nodes: pulumi.Output[float]
"""
The initial number of cache nodes that the
cache cluster will have. For Redis, this value must be 1. For Memcache, this
value must be between 1 and 20. If this number is reduced on subsequent runs,
the highest numbered nodes will be removed.
"""
parameter_group_name: pulumi.Output[str]
"""
Name of the parameter group to associate
with this cache cluster
"""
port: pulumi.Output[float]
"""
The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`.
"""
preferred_availability_zones: pulumi.Output[list]
"""
A list of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference.
"""
replication_group_id: pulumi.Output[str]
"""
The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.
"""
security_group_ids: pulumi.Output[list]
"""
One or more VPC security groups associated
with the cache cluster
"""
security_group_names: pulumi.Output[list]
"""
List of security group
names to associate with this cache cluster
"""
snapshot_arns: pulumi.Output[list]
"""
A single-element string list containing an
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
"""
snapshot_name: pulumi.Output[str]
"""
The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
"""
snapshot_retention_limit: pulumi.Output[float]
"""
The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
"""
snapshot_window: pulumi.Output[str]
"""
The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00
"""
subnet_group_name: pulumi.Output[str]
"""
Name of the subnet group to be used
for the cache cluster.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource
"""
def __init__(__self__, resource_name, opts=None, apply_immediately=None, availability_zone=None, az_mode=None, cluster_id=None, engine=None, engine_version=None, maintenance_window=None, node_type=None, notification_topic_arn=None, num_cache_nodes=None, parameter_group_name=None, port=None, preferred_availability_zones=None, replication_group_id=None, security_group_ids=None, security_group_names=None, snapshot_arns=None, snapshot_name=None, snapshot_retention_limit=None, snapshot_window=None, subnet_group_name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an ElastiCache Cluster resource, which manages a Memcached cluster or Redis instance.
For working with Redis (Cluster Mode Enabled) replication groups, see the
[`elasticache.ReplicationGroup` resource](https://www.terraform.io/docs/providers/aws/r/elasticache_replication_group.html).
> **Note:** When you change an attribute, such as `node_type`, by default
it is applied in the next maintenance window. Because of this, this provider may report
a difference in its planning phase because the actual modification has not yet taken
place. You can use the `apply_immediately` flag to instruct the service to apply the
change immediately. Using `apply_immediately` can result in a brief downtime as the server reboots.
See the AWS Docs on [Modifying an ElastiCache Cache Cluster][2] for more information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any database modifications
are applied immediately, or during the next maintenance window. Default is
`false`. See [Amazon ElastiCache Documentation for more information.][1]
(Available since v0.6.0)
:param pulumi.Input[str] availability_zone: The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.
:param pulumi.Input[str] az_mode: Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `num_cache_nodes` must be greater than `1`
:param pulumi.Input[str] cluster_id: Group identifier. ElastiCache converts
this name to lowercase
:param pulumi.Input[str] engine: Name of the cache engine to be used for this cache cluster.
Valid values for this parameter are `memcached` or `redis`
:param pulumi.Input[str] engine_version: Version number of the cache engine to be used.
See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html)
in the AWS Documentation center for supported versions
:param pulumi.Input[str] maintenance_window: Specifies the weekly time range for when maintenance
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
:param pulumi.Input[str] node_type: The compute and memory capacity of the nodes. See
[Available Cache Node Types](https://aws.amazon.com/elasticache/details#Available_Cache_Node_Types) for
supported node types
:param pulumi.Input[str] notification_topic_arn: An Amazon Resource Name (ARN) of an
SNS topic to send ElastiCache notifications to. Example:
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
:param pulumi.Input[float] num_cache_nodes: The initial number of cache nodes that the
cache cluster will have. For Redis, this value must be 1. For Memcache, this
value must be between 1 and 20. If this number is reduced on subsequent runs,
the highest numbered nodes will be removed.
:param pulumi.Input[str] parameter_group_name: Name of the parameter group to associate
with this cache cluster
:param pulumi.Input[float] port: The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`.
:param pulumi.Input[list] preferred_availability_zones: A list of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference.
:param pulumi.Input[str] replication_group_id: The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.
:param pulumi.Input[list] security_group_ids: One or more VPC security groups associated
with the cache cluster
:param pulumi.Input[list] security_group_names: List of security group
names to associate with this cache cluster
:param pulumi.Input[list] snapshot_arns: A single-element string list containing an
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
:param pulumi.Input[str] snapshot_name: The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
:param pulumi.Input[float] snapshot_retention_limit: The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
:param pulumi.Input[str] snapshot_window: The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00
:param pulumi.Input[str] subnet_group_name: Name of the subnet group to be used
for the cache cluster.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['apply_immediately'] = apply_immediately
__props__['availability_zone'] = availability_zone
__props__['az_mode'] = az_mode
__props__['cluster_id'] = cluster_id
__props__['engine'] = engine
__props__['engine_version'] = engine_version
__props__['maintenance_window'] = maintenance_window
__props__['node_type'] = node_type
__props__['notification_topic_arn'] = notification_topic_arn
__props__['num_cache_nodes'] = num_cache_nodes
__props__['parameter_group_name'] = parameter_group_name
__props__['port'] = port
__props__['preferred_availability_zones'] = preferred_availability_zones
__props__['replication_group_id'] = replication_group_id
__props__['security_group_ids'] = security_group_ids
__props__['security_group_names'] = security_group_names
__props__['snapshot_arns'] = snapshot_arns
__props__['snapshot_name'] = snapshot_name
__props__['snapshot_retention_limit'] = snapshot_retention_limit
__props__['snapshot_window'] = snapshot_window
__props__['subnet_group_name'] = subnet_group_name
__props__['tags'] = tags
__props__['arn'] = None
__props__['cache_nodes'] = None
__props__['cluster_address'] = None
__props__['configuration_endpoint'] = None
super(Cluster, __self__).__init__(
'aws:elasticache/cluster:Cluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, apply_immediately=None, arn=None, availability_zone=None, az_mode=None, cache_nodes=None, cluster_address=None, cluster_id=None, configuration_endpoint=None, engine=None, engine_version=None, maintenance_window=None, node_type=None, notification_topic_arn=None, num_cache_nodes=None, parameter_group_name=None, port=None, preferred_availability_zones=None, replication_group_id=None, security_group_ids=None, security_group_names=None, snapshot_arns=None, snapshot_name=None, snapshot_retention_limit=None, snapshot_window=None, subnet_group_name=None, tags=None):
"""
Get an existing Cluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any database modifications
are applied immediately, or during the next maintenance window. Default is
`false`. See [Amazon ElastiCache Documentation for more information.][1]
(Available since v0.6.0)
:param pulumi.Input[str] availability_zone: The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.
:param pulumi.Input[str] az_mode: Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `num_cache_nodes` must be greater than `1`
:param pulumi.Input[list] cache_nodes: List of node objects including `id`, `address`, `port` and `availability_zone`.
Referenceable e.g. as `${aws_elasticache_cluster.bar.cache_nodes.0.address}`
:param pulumi.Input[str] cluster_address: (Memcached only) The DNS name of the cache cluster without the port appended.
:param pulumi.Input[str] cluster_id: Group identifier. ElastiCache converts
this name to lowercase
:param pulumi.Input[str] configuration_endpoint: (Memcached only) The configuration endpoint to allow host discovery.
:param pulumi.Input[str] engine: Name of the cache engine to be used for this cache cluster.
Valid values for this parameter are `memcached` or `redis`
:param pulumi.Input[str] engine_version: Version number of the cache engine to be used.
See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html)
in the AWS Documentation center for supported versions
:param pulumi.Input[str] maintenance_window: Specifies the weekly time range for when maintenance
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
:param pulumi.Input[str] node_type: The compute and memory capacity of the nodes. See
[Available Cache Node Types](https://aws.amazon.com/elasticache/details#Available_Cache_Node_Types) for
supported node types
:param pulumi.Input[str] notification_topic_arn: An Amazon Resource Name (ARN) of an
SNS topic to send ElastiCache notifications to. Example:
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
:param pulumi.Input[float] num_cache_nodes: The initial number of cache nodes that the
cache cluster will have. For Redis, this value must be 1. For Memcache, this
value must be between 1 and 20. If this number is reduced on subsequent runs,
the highest numbered nodes will be removed.
:param pulumi.Input[str] parameter_group_name: Name of the parameter group to associate
with this cache cluster
:param pulumi.Input[float] port: The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`.
:param pulumi.Input[list] preferred_availability_zones: A list of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference.
:param pulumi.Input[str] replication_group_id: The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.
:param pulumi.Input[list] security_group_ids: One or more VPC security groups associated
with the cache cluster
:param pulumi.Input[list] security_group_names: List of security group
names to associate with this cache cluster
:param pulumi.Input[list] snapshot_arns: A single-element string list containing an
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
:param pulumi.Input[str] snapshot_name: The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
:param pulumi.Input[float] snapshot_retention_limit: The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
:param pulumi.Input[str] snapshot_window: The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00
:param pulumi.Input[str] subnet_group_name: Name of the subnet group to be used
for the cache cluster.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource
The **cache_nodes** object supports the following:
* `address` (`pulumi.Input[str]`)
* `availability_zone` (`pulumi.Input[str]`) - The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.
* `id` (`pulumi.Input[str]`)
* `port` (`pulumi.Input[float]`) - The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["apply_immediately"] = apply_immediately
__props__["arn"] = arn
__props__["availability_zone"] = availability_zone
__props__["az_mode"] = az_mode
__props__["cache_nodes"] = cache_nodes
__props__["cluster_address"] = cluster_address
__props__["cluster_id"] = cluster_id
__props__["configuration_endpoint"] = configuration_endpoint
__props__["engine"] = engine
__props__["engine_version"] = engine_version
__props__["maintenance_window"] = maintenance_window
__props__["node_type"] = node_type
__props__["notification_topic_arn"] = notification_topic_arn
__props__["num_cache_nodes"] = num_cache_nodes
__props__["parameter_group_name"] = parameter_group_name
__props__["port"] = port
__props__["preferred_availability_zones"] = preferred_availability_zones
__props__["replication_group_id"] = replication_group_id
__props__["security_group_ids"] = security_group_ids
__props__["security_group_names"] = security_group_names
__props__["snapshot_arns"] = snapshot_arns
__props__["snapshot_name"] = snapshot_name
__props__["snapshot_retention_limit"] = snapshot_retention_limit
__props__["snapshot_window"] = snapshot_window
__props__["subnet_group_name"] = subnet_group_name
__props__["tags"] = tags
return Cluster(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 72.451351
| 792
| 0.719327
| 3,688
| 26,807
| 5.050163
| 0.10602
| 0.030121
| 0.040376
| 0.028564
| 0.84451
| 0.831517
| 0.824268
| 0.820617
| 0.806336
| 0.798336
| 0
| 0.010008
| 0.213489
| 26,807
| 369
| 793
| 72.647696
| 0.873364
| 0.496736
| 0
| 0.017241
| 1
| 0
| 0.152171
| 0.03238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0.008621
| 0.051724
| 0.017241
| 0.344828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
130c51b2c8ae68247d06e21a13fb0e08a71f6976
| 473,295
|
py
|
Python
|
dev/services/wms/ows/wms_cfg.py
|
ASVincent/dea-config
|
ca1edee807de706057d8d3f54f0166ee8e5325d8
|
[
"Apache-2.0"
] | null | null | null |
dev/services/wms/ows/wms_cfg.py
|
ASVincent/dea-config
|
ca1edee807de706057d8d3f54f0166ee8e5325d8
|
[
"Apache-2.0"
] | null | null | null |
dev/services/wms/ows/wms_cfg.py
|
ASVincent/dea-config
|
ca1edee807de706057d8d3f54f0166ee8e5325d8
|
[
"Apache-2.0"
] | null | null | null |
# Static config for the wms metadata.
response_cfg = {
"Access-Control-Allow-Origin": "*", # CORS header
}
service_cfg = {
## Which web service(s) should be supported by this instance
"wcs": True,
"wms": True,
"wmts": True,
## Required config for WMS and/or WCS
# Service title - appears e.g. in Terria catalog
"title": "Digital Earth Australia - OGC Web Services",
# Service URL. Should a fully qualified URL
"url": [
"https://ows.services.dea.ga.gov.au",
"https://ows.services.devkube.dea.ga.gov.au",
"https://nrt.services.dea.ga.gov.au",
"https://geomedian.services.dea.ga.gov.au",
"https://geomedianau.dea.ga.gov.au",
"https://geomedian.dea.ga.gov.au",
"https://nrt.dea.ga.gov.au",
"https://nrt-au.dea.ga.gov.au"],
"human_url": "dea.ga.gov.au/",
# Supported co-ordinate reference systems
"published_CRSs": {
"EPSG:3857": { # Web Mercator
"geographic": False,
"horizontal_coord": "x",
"vertical_coord": "y",
},
"EPSG:4326": { # WGS-84
"geographic": True,
"vertical_coord_first": True
},
"EPSG:3577": { # GDA-94, internal representation
"geographic": False,
"horizontal_coord": "x",
"vertical_coord": "y",
},
},
## Required config for WCS
# Must be a geographic CRS in the published_CRSs list. EPSG:4326 is recommended, but any geographic CRS should work.
"default_geographic_CRS": "EPSG:4326",
# Supported WCS formats
"wcs_formats": {
# Key is the format name, as used in DescribeCoverage XML
"GeoTIFF": {
# Renderer is the FQN of a Python function that takes:
# * A WCS Request object
# * Some ODC data to be rendered.
"renderer": "datacube_wms.wcs_utils.get_tiff",
# The MIME type of the image, as used in the Http Response.
"mime": "image/geotiff",
# The file extension to add to the filename.
"extension": "tif",
# Whether or not the file format supports multiple time slices.
"multi-time": False
},
"netCDF": {
"renderer": "datacube_wms.wcs_utils.get_netcdf",
"mime": "application/x-netcdf",
"extension": "nc",
"multi-time": True,
}
},
# The native wcs format must be declared in wcs_formats above.
"native_wcs_format": "GeoTIFF",
## Optional config for instances supporting WMS
"max_width": 512,
"max_height": 512,
# Optional config for all services (WMS and/or WCS) - may be set to blank/empty, no defaults
"abstract": """Digital Earth Australia OGC Web Services""",
"keywords": [
"geomedian",
"WOfS",
"mangrove",
"bare-earth",
"NIDEM",
"HLTC",
"landsat",
"australia",
"time-series",
"fractional-cover"
],
"contact_info": {
"person": "Digital Earth Australia",
"organisation": "Geoscience Australia",
"position": "",
"address": {
"type": "postal",
"address": "GPO Box 378",
"city": "Canberra",
"state": "ACT",
"postcode": "2609",
"country": "Australia",
},
"telephone": "+61 2 6249 9111",
"fax": "",
"email": "earth.observation@ga.gov.au",
},
"fees": "",
"access_constraints": "© Commonwealth of Australia (Geoscience Australia) 2018. "
"This product is released under the Creative Commons Attribution 4.0 International Licence. "
"http://creativecommons.org/licenses/by/4.0/legalcode",
"preauthenticate_s3": True,
"geotiff_georeference_source": "INTERNAL"
}
layer_cfg = [
# Layer Config is a list of platform configs
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "Geomedian_AU_NBART",
"title": "Surface Reflectance",
"abstract": "",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "Landsat 8",
# Included as a keyword for the layer
"type": "Annual Geomedian",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"abstract": """
Data is only visible at higher resolutions; when zoomed-out the available area will be displayed
as a shaded region. The surface reflectance geometric median (geomedian) is a pixel composite
mosaic of a time series of earth observations. The value of a pixel in a an annual geomedian
image is the statistical median of all observations for that pixel from a calendar year.
Annual mosaics are available for the following years:
Landsat 8: 2013 to 2017;
For more information, see http://pid.geoscience.gov.au/dataset/ga/120374
For service status information, see https://status.dea.ga.gov.au""",
"name": "ls8_nbart_geomedian_annual",
# The Datacube name for the associated data product
"product_name": "ls8_nbart_geomedian_annual",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 35.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [ ],
"apply_solar_corrections": False,
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi", "ndwi"]
},
"wcs_default_bands": ["red", "green", "blue"],
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
#
# Examples of non-linear heat-mapped styles.
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "ndwi",
"title": "NDWI - Green, SWIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": lambda data: (data["green"] - data["swir1"]) / (data["swir1"] + data["green"]),
"needed_bands": ["green", "swir1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 1.0,
"color": "#0303FF",
},
]
},
{
"name": "blue",
"title": "Blue - 480",
"abstract": "Blue band, centered on 480nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Green - 560",
"abstract": "Green band, centered on 560nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Red - 660",
"abstract": "Red band, centered on 660nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Near Infrared (NIR) - 870",
"abstract": "Near infra-red band, centered on 870nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Shortwave Infrared (SWIR) - 1610",
"abstract": "Short wave infra-red band 1, centered on 1610nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Shortwave Infrared (SWIR) - 2200",
"abstract": "Short wave infra-red band 2, centered on 2200nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
{
# Included as a keyword for the layer
"label": "Landsat 7",
# Included as a keyword for the layer
"type": "Annual Geomedian",
# Included as a keyword for the layer
"variant": "25m",
"abstract": """
Data is only visible at higher resolutions; when zoomed-out the available area will be displayed
as a shaded region. The surface reflectance geometric median (geomedian) is a pixel composite
mosaic of a time series of earth observations. The value of a pixel in a an annual geomedian
image is the statistical median of all observations for that pixel from a calendar year.
Annual mosaics are available for the following years:
Landsat 7: 2000 to 2017;
For more information, see http://pid.geoscience.gov.au/dataset/ga/120374
For service status information, see https://status.dea.ga.gov.au""",
# The WMS name for the layer
"name": "ls7_nbart_geomedian_annual",
# The Datacube name for the associated data product
"product_name": "ls7_nbart_geomedian_annual",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 35.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [],
"apply_solar_corrections": False,
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi", "ndwi"]
},
"wcs_default_bands": ["red", "green", "blue"],
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
#
# Examples of non-linear heat-mapped styles.
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "ndwi",
"title": "NDWI - Green, SWIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": lambda data: (data["green"] - data["swir1"]) / (data["swir1"] + data["green"]),
"needed_bands": ["green", "swir1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 1.0,
"color": "#0303FF",
},
]
},
{
"name": "blue",
"title": "Blue - 490",
"abstract": "Blue band, centered on 490nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Green - 560",
"abstract": "Green band, centered on 560nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Red - 660",
"abstract": "Red band, centered on 660nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Near Infrared (NIR) - 840",
"abstract": "Near infra-red band, centered on 840nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Shortwave Infrared (SWIR) - 1650",
"abstract": "Short wave infra-red band 1, centered on 1650nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Shortwave Infrared (SWIR) - 2220",
"abstract": "Short wave infra-red band 2, centered on 2220nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
{
# Included as a keyword for the layer
"label": "Landsat 5",
# Included as a keyword for the layer
"type": "Annual Geomedian",
# Included as a keyword for the layer
"variant": "25m",
"abstract": """
Data is only visible at higher resolutions; when zoomed-out the available area will be displayed
as a shaded region. The surface reflectance geometric median (geomedian) is a pixel composite
mosaic of a time series of earth observations. The value of a pixel in a an annual geomedian
image is the statistical median of all observations for that pixel from a calendar year.
Annual mosaics are available for the following years:
Landsat 5: 1988 to 1999, 2004 to 2007, 2009 to 2011;
For more information, see http://pid.geoscience.gov.au/dataset/ga/120374
For service status information, see https://status.dea.ga.gov.au""",
# The WMS name for the layer
"name": "ls5_nbart_geomedian_annual",
# The Datacube name for the associated data product
"product_name": "ls5_nbart_geomedian_annual",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 35.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [],
"apply_solar_corrections": False,
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi", "ndwi"]
},
"wcs_default_bands": ["red", "green", "blue"],
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
#
# Examples of non-linear heat-mapped styles.
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "ndwi",
"title": "NDWI - Green, SWIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": lambda data: (data["green"] - data["swir1"]) / (data["swir1"] + data["green"]),
"needed_bands": ["green", "swir1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 1.0,
"color": "#0303FF",
},
]
},
{
"name": "blue",
"title": "Blue - 490",
"abstract": "Blue band, centered on 490nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Green - 560",
"abstract": "Green band, centered on 560nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Red - 660",
"abstract": "Red band, centered on 660nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Near Infrared (NIR) - 840",
"abstract": "Near infra-red band, centered on 840nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Shortwave Infrared (SWIR) - 1650",
"abstract": "Short wave infra-red band 1, centered on 1650nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Shortwave Infrared (SWIR) - 2220",
"abstract": "Short wave infra-red band 2, centered on 2220nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
}
]
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "landsat8_barest_earth",
"title": "Barest Earth",
"abstract": """
A `weighted geometric median’ approach has been used to estimate the median surface reflectance of the barest state (i.e., least vegetation) observed through Landsat-8 OLI observations from 2013 to September 2018 to generate a six-band Landsat-8 Barest Earth pixel composite mosaic over the Australian continent.
The bands include BLUE (0.452 - 0.512), GREEN (0.533 - 0.590), RED, (0.636 - 0.673) NIR (0.851 - 0.879), SWIR1 (1.566 - 1.651) and SWIR2 (2.107 - 2.294) wavelength regions. The weighted median approach is robust to outliers (such as cloud, shadows, saturation, corrupted pixels) and also maintains the relationship between all the spectral wavelengths in the spectra observed through time. The product reduces the influence of vegetation and allows for more direct mapping of soil and rock mineralogy.
Reference: Dale Roberts, John Wilford, and Omar Ghattas (2018). Revealing the Australian Continent at its Barest, submitted.
Mosaics are available for the following years:
Landsat 8: 2013 to 2017;
""",
# Link removed until eCat record is "published_external", not "published_internal"
# For more information, see the dataset record: http://pid.geoscience.gov.au/dataset/ga/122573
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "Landsat 8",
# Included as a keyword for the layer
"type": "Barest Earth",
# Included as a keyword for the layer
"variant": "25m",
"abstract": """
A `weighted geometric median’ approach has been used to estimate the median surface reflectance of the barest state (i.e., least vegetation) observed through Landsat-8 OLI observations from 2013 to September 2018 to generate a six-band Landsat-8 Barest Earth pixel composite mosaic over the Australian continent.
The bands include BLUE (0.452 - 0.512), GREEN (0.533 - 0.590), RED, (0.636 - 0.673) NIR (0.851 - 0.879), SWIR1 (1.566 - 1.651) and SWIR2 (2.107 - 2.294) wavelength regions. The weighted median approach is robust to outliers (such as cloud, shadows, saturation, corrupted pixels) and also maintains the relationship between all the spectral wavelengths in the spectra observed through time. The product reduces the influence of vegetation and allows for more direct mapping of soil and rock mineralogy.
Reference: Dale Roberts, John Wilford, and Omar Ghattas (2018). Revealing the Australian Continent at its Barest, submitted.
Mosaics are available for the following years:
Landsat 8: 2013 to 2017;
For service status information, see https://status.dea.ga.gov.au""",
# Link removed until eCat record is "published_external", not "published_internal"
# For more information, see the dataset record: http://pid.geoscience.gov.au/dataset/ga/122573
# The WMS name for the layer
"name": "ls8_barest_earth_mosaic",
# The Datacube name for the associated data product
"product_name": "ls8_barest_earth_albers",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 35.0,
#"max_datasets_wms": 1000,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [],
"apply_solar_corrections": False,
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi"]
},
"wcs_default_bands": ["red", "green", "blue"],
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "blue",
"title": "Blue - 480",
"abstract": "Blue band, centered on 480nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Green - 560",
"abstract": "Green band, centered on 560nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Red - 660",
"abstract": "Red band, centered on 660nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Near Infrared (NIR) - 870",
"abstract": "Near infra-red band, centered on 870nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Shortwave Infrared (SWIR) - 1610",
"abstract": "Short wave infra-red band 1, centered on 1610nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Shortwave Infrared (SWIR) - 2200",
"abstract": "Short wave infra-red band 2, centered on 2200nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
}
]
},
{
"name": "mangrove_cover",
"title": "Mangrove Canopy Cover",
"abstract": "",
"products": [
{
"label": "Mangrove Canopy Cover",
"abstract": """
Mangrove canopy cover version 1, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
The mangrove canopy cover product provides valuable information about the extent and canopy density of mangroves for each year between 1987 and 2016 for the entire Australian coastline.
The canopy cover classes are:
20-50% (pale green), 50-80% (mid green), 80-100% (dark green).
The product consists of a sequence (one per year) of 25 meter resolution maps that are generated by analysing the Landsat fractional cover (https://doi.org/10.6084/m9.figshare.94250.v1) developed by the Joint Remote Sensing Research Program and the Global Mangrove Watch layers (https://doi.org/10.1071/MF13177) developed by the Japanese Aerospace Exploration Agency.
The mangrove canopy cover version 1 product has the following caveats:
it underestimates the overall extent of mangroves.
it doesn’t detect small mangrove communities i.e. smaller estuaries in NSW and Victoria
that there is localised confusion between mangroves and wooded freshwater wetlands i.e. Melaleuca swamps, and
in some locations dense dwarf/shrub mangroves that are less than 2 metres tall may be mis-labelled as woodland/open forest/closed forest.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "mangrove_cover",
"product_name": "mangrove_cover",
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data["extent"] == 1,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": ["extent"],
"apply_solar_corrections": False,
"legend": {
"styles": ["mangrove"]
},
"wcs_default_bands": ["canopy_cover_class", "extent"],
"styles": [
{
"name": "mangrove",
"title": "Mangrove Cover",
"abstract": "",
"value_map": {
"canopy_cover_class": [
{
"title": "Woodland",
"abstract": "(20% - 50% cover)",
"flags": {
"woodland": True
},
"color": "#9FFF4C"
},
{
"title": "Open Forest",
"abstract": "(50% - 80% cover)",
"flags": {
"open_forest": True
},
"color": "#5ECC00"
},
{
"title": "Closed Forest",
"abstract": "(>80% cover)",
"flags": {
"closed_forest": True
},
"color": "#3B7F00"
},
]
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "mangrove",
},
]
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "WOfS",
"title": "Water Observations from Space",
"abstract": "WOfS",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "WOfS Filtered Statistics",
# Included as a keyword for the layer
"type": "Filtered Water Summary",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_filtered_summary",
# The Datacube name for the associated data product
"product_name": "wofs_filtered_summary",
"abstract": """
Water Observations from Space (WOfS) Filtered Statistics helps provide the long term understanding of the recurrence of water in the landscape, with much of the noise due to misclassification filtered out. WOfS Filtered Statistics consists of a Confidence layer that compares the WOfS Statistics water summary to other national water datasets, and the Filtered Water Summary which uses the Confidence to mask areas of the WOfS Statistics water summary where Confidence is low.
This layer is Filtered Water Summary: A simplified version of the Water Summary, showing the frequency of water observations where the Confidence is above a cutoff level. No clear observations of water causes an area to appear transparent, few clear observations of water correlate with red and yellow colours, deep blue and purple correspond to an area being wet through 90%-100% of clear observations.
The Filtered Water Summary layer is a noise-reduced view of surface water across Australia. Even though confidence filtering is applied to the Filtered Water Summary, some cloud and shadow, and sensor noise does persist.
For more information please see: https://data.dea.ga.gov.au/?prefix=WOfS/filtered_summary/v2.1.0/Product%20Description.pdf
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": [
"WOfS_filtered_frequency",
"WOfS_filtered_frequency_blues_transparent"]
},
"wcs_default_bands": ["wofs_filtered_summary"],
"styles": [
{
"name": "WOfS_filtered_frequency",
"title": "Filtered Water Summary",
"abstract": "WOfS filtered summary showing the frequency of Wetness",
"needed_bands": ["wofs_filtered_summary"],
"color_ramp": [
{
"value": 0.0,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.002,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.005,
"color": "#8e0101",
"alpha": 0.25
},
{
"value": 0.01,
"color": "#cf2200",
"alpha": 0.75
},
{
"value": 0.02,
"color": "#e38400"
},
{
"value": 0.05,
"color": "#e3df00"
},
{
"value": 0.1,
"color": "#a6e300"
},
{
"value": 0.2,
"color": "#62e300"
},
{
"value": 0.3,
"color": "#00e32d"
},
{
"value": 0.4,
"color": "#00e384"
},
{
"value": 0.5,
"color": "#00e3c8"
},
{
"value": 0.6,
"color": "#00c5e3"
},
{
"value": 0.7,
"color": "#0097e3"
},
{
"value": 0.8,
"color": "#005fe3"
},
{
"value": 0.9,
"color": "#000fe3"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
{
"name": "WOfS_filtered_frequency_blues_transparent",
"title": "Water Summary (Blue)",
"abstract": "WOfS filtered summary showing the frequency of Wetness",
"needed_bands": ["wofs_filtered_summary"],
"color_ramp": [
{
"value": 0.0,
"color": "#ffffff",
"alpha": 0.0,
},
{
"value": 0.001,
"color": "#d5fef9",
"alpha": 0.0,
},
{
"value": 0.02,
"color": "#d5fef9",
},
{
"value": 0.2,
"color": "#71e3ff"
},
{
"value": 0.4,
"color": "#01ccff"
},
{
"value": 0.6,
"color": "#0178ff"
},
{
"value": 0.8,
"color": "#2701ff"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "WOfS_filtered_frequency",
},
{
# Included as a keyword for the layer
"label": "WOfS Statistics",
# Included as a keyword for the layer
"type": "Wet Count",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_summary_wet",
# The Datacube name for the associated data product
"product_name": "wofs_summary",
"abstract": """
Water Observations from Space (WOfS) Statistics is a set of statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products which help the understanding of surface water across Australia. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This layer contains Wet Count: how many times water was detected in observations that were clear. No clear observations of water causes an area to appear transparent, 1-50 total clear observations of water correlate with red and yellow colours, 100 clear observations of water correlate with green, 200 clear observations of water correlates with light blue, 300 clear observations of water correlates to deep blue and 400 and over observations of clear water correlate to purple.
As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own. The confidence layer and filtered summary are contained in the Water Observations from Space Statistics Filtered Summary product, which provide a noise-reduced view of the water summary.
For more information please see: https://data.dea.ga.gov.au/WOfS/summary/v2.1.0/Product%20Description.pdf
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["water_observations"]
},
"wcs_default_bands": ["count_wet"],
"styles": [
{
"name": "water_observations",
"title": "Wet Count",
"abstract": "WOfS summary showing the count of water observations",
"needed_bands": ["count_wet"],
"color_ramp": [
{
"value": 0,
"color": "#666666",
"alpha": 0
},
{
"value": 2,
"color": "#890000"
},
{
"value": 5,
"color": "#990000"
},
{
"value": 10,
"color": "#E38400"
},
{
"value": 25,
"color": "#E3DF00"
},
{
"value": 50,
"color": "#A6E300"
},
{
"value": 100,
"color": "#00E32D"
},
{
"value": 150,
"color": "#00E3C8"
},
{
"value": 200,
"color": "#0097E3"
},
{
"value": 250,
"color": "#005FE3"
},
{
"value": 300,
"color": "#000FE3"
},
{
"value": 350,
"color": "#000EA9"
},
{
"value": 400,
"color": "#5700E3",
"legend": {
"prefix": ">"
}
}
],
"legend": {
"radix_point": 0,
"scale_by": 1,
"major_ticks": 100
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "water_observations",
},
{
# Included as a keyword for the layer
"label": "WOfS Statistics",
# Included as a keyword for the layer
"type": "Clear Count",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_summary_clear",
# The Datacube name for the associated data product
"product_name": "wofs_summary",
"abstract": """
Water Observations from Space (WOfS) Statistics is a set of statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products which help the understanding of surface water across Australia. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This layer contains Clear Count: how many times an area could be clearly seen (ie. not affected by clouds, shadows or other satellite observation problems). No clear observations causes an area to appear transparent, 1-300 total clear observations of water correlate with red and yellow colours, 400 clear observations correlates with light green, 800 clear observations and above correlates with dark green.
As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own. The confidence layer and filtered summary are contained in the Water Observations from Space Statistics Filtered Summary product, which provide a noise-reduced view of the water summary.
For more information please see: https://data.dea.ga.gov.au/WOfS/summary/v2.1.0/Product%20Description.pdf
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["clear_observations"]
},
"wcs_default_bands": ["count_clear"],
"styles": [
{
"name": "clear_observations",
"title": "Clear Count",
"abstract": "WOfS summary showing the count of clear observations",
"needed_bands": ["count_clear"],
"color_ramp": [
{
"value": 0,
"color": "#FFFFFF",
"alpha": 0
},
{
# purely for legend display
# we should not get fractional
# values in this styles
"value": 10,
"color": "#b21800",
"alpha": 1
},
{
"value": 100,
"color": "#ef8500"
},
{
"value": 200,
"color": "#ffb800"
},
{
"value": 300,
"color": "#ffd300"
},
{
"value": 400,
"color": "#ffe300"
},
{
"value": 500,
"color": "#fff300"
},
{
"value": 600,
"color": "#d0f800"
},
{
"value": 700,
"color": "#a0fd00"
},
{
"value": 800,
"color": "#6ee100"
},
{
"value": 901,
"color": "#39a500"
},
{
"value": 1000,
"color": "#026900",
"legend": {
"prefix": ">"
}
}
],
"legend": {
"radix_point": 0,
"scale_by": 1,
"major_ticks": 100,
"axes_position": [0.05, 0.5, 0.89, 0.15]
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "clear_observations",
},
{
# Included as a keyword for the layer
"label": "WOfS Statistics",
# Included as a keyword for the layer
"type": "Water Summary",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "Water Observations from Space Statistics",
# The Datacube name for the associated data product
"product_name": "wofs_summary",
"abstract": """
Water Observations from Space (WOfS) Statistics is a set of statistical summaries of the WOfS product which combines WOfS observations into summary products that help the understanding of surface water across Australia. WOfS Statistics is calculated from the full depth time series (1986 – 2018). The water detected for each location is summed through time and then compared to the number of clear observations of that location. The result is a percentage value of the number of times water was observed at the location. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time (water summary).
This layer contains the Water Summary: the percentage of clear observations which were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations of water causes an area to appear transparent, few clear observations of water correlate with red and yellow colours, deep blue and purple correspond to an area being wet through 90%-100% of clear observations.
As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own. The confidence layer and filtered summary are contained in the Water Observations from Space Statistics Filtered Summary product, which provide a noise-reduced view of the water summary.
For more information please see: https://data.dea.ga.gov.au/WOfS/summary/v2.1.0/Product%20Description.pdf
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["WOfS_frequency", "WOfS_frequency_blues_transparent"]
},
"wcs_default_bands": ["frequency"],
"styles": [
{
"name": "WOfS_frequency",
"title": " Water Summary",
"abstract": "WOfS summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.002,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.005,
"color": "#8e0101",
"alpha": 0.25
},
{
"value": 0.01,
"color": "#cf2200",
"alpha": 0.75
},
{
"value": 0.02,
"color": "#e38400"
},
{
"value": 0.05,
"color": "#e3df00"
},
{
"value": 0.1,
"color": "#a6e300"
},
{
"value": 0.2,
"color": "#62e300"
},
{
"value": 0.3,
"color": "#00e32d"
},
{
"value": 0.4,
"color": "#00e384"
},
{
"value": 0.5,
"color": "#00e3c8"
},
{
"value": 0.6,
"color": "#00c5e3"
},
{
"value": 0.7,
"color": "#0097e3"
},
{
"value": 0.8,
"color": "#005fe3"
},
{
"value": 0.9,
"color": "#000fe3"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
{
"name": "WOfS_frequency_blues_transparent",
"title": "Water Summary (Blue)",
"abstract": "WOfS summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#ffffff",
"alpha": 0.0,
},
{
"value": 0.001,
"color": "#d5fef9",
"alpha": 0.0,
},
{
"value": 0.02,
"color": "#d5fef9",
},
{
"value": 0.2,
"color": "#71e3ff"
},
{
"value": 0.4,
"color": "#01ccff"
},
{
"value": 0.6,
"color": "#0178ff"
},
{
"value": 0.8,
"color": "#2701ff"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "WOfS_frequency",
},
{
# Included as a keyword for the layer
"label": "WOfS Filtered Statistics",
# Included as a keyword for the layer
"type": "Confidence",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_filtered_summary_confidence",
# The Datacube name for the associated data product
"product_name": "wofs_filtered_summary",
"abstract": """
Water Observations from Space (WOfS) Filtered Statistics helps provide the long term understanding of the recurrence of water in the landscape, with much of the noise due to misclassification filtered out. WOfS Filtered Statistics consists of a Confidence layer that compares the WOfS Statistics water summary to other national water datasets, and the Filtered Water Summary which uses the Confidence to mask areas of the WOfS Statistics water summary where Confidence is low.
This layer is Confidence: the degree of agreement between water shown in the Water Summary and other national datasets. Areas where there is less than 1% confidence appears black, areas with confidence for between 1% 10% confidence are styled between black and red, areas with 25% confidence are styled yellow, areas with 75% confidence and above correspond to green.
The Confidence layer provides understanding of whether the water shown in the Water Summary agrees with where water should exist in the landscape, such as due to sloping land or whether water has been detected in a location by other means.
For more information please see: https://data.dea.ga.gov.au/WOfS/filtered_summary/v2.1.0/Product%20Description.pdf
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["wofs_confidence"]
},
"wcs_default_bands": ["confidence"],
"styles": [
{
"name": "wofs_confidence",
"title": "Confidence",
"abstract": "WOfS Confidence",
"needed_bands": ["confidence"],
"color_ramp": [
{
"value": 0,
"color": "#000000",
},
{
"value": 0.01,
"color": "#000000"
},
{
"value": 0.02,
"color": "#990000"
},
{
"value": 0.05,
"color": "#CF2200"
},
{
"value": 0.1,
"color": "#E38400"
},
{
"value": 0.25,
"color": "#E3DF00"
},
{
"value": 0.5,
"color": "#A6E300"
},
{
"value": 0.75,
"color": "#62E300"
},
{
"value": 1.0,
"color": "#00E32D"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.25
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "wofs_confidence",
},
{
# Included as a keyword for the layer
"label": "WOfS Annual Statistics",
# Included as a keyword for the layer
"type": "Wet Count",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_annual_summary_wet",
# The Datacube name for the associated data product
"product_name": "wofs_annual_summary",
"abstract": """
Water Observations from Space - Annual Statistics is a set of annual statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - Annual Statistics, a set of annual statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations of water causes an area to appear transparent, 1-50 total clear observations of water correlate with red and yellow colours, 100 clear observations of water correlate with green, 200 clear observations of water correlates with light blue, 300 clear observations of water correlates to deep blue and 400 and over observations of clear water correlate to purple.
For more information please see: https://data.dea.ga.gov.au/WOfS/annual_summary/v2.1.5/Product%20Description.pdf
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["annual_water_observations"]
},
"wcs_default_bands": ["count_wet"],
"styles": [
{
"name": "annual_water_observations",
"title": "Wet Count",
"abstract": "WOfS annual summary showing the count of water observations",
"needed_bands": ["count_wet"],
"color_ramp": [
{
"value": 0,
"color": "#666666",
"alpha": 0
},
{
# purely for legend display
# we should not get fractional
# values in this styles
"value": 0.2,
"color": "#990000",
"alpha": 1
},
{
"value": 2,
"color": "#990000"
},
{
"value": 4,
"color": "#E38400"
},
{
"value": 6,
"color": "#E3DF00"
},
{
"value": 8,
"color": "#00E32D"
},
{
"value": 10,
"color": "#00E3C8"
},
{
"value": 12,
"color": "#0097E3"
},
{
"value": 14,
"color": "#005FE3"
},
{
"value": 16,
"color": "#000FE3"
},
{
"value": 18,
"color": "#000EA9"
},
{
"value": 20,
"color": "#5700E3",
"legend": {
"prefix": ">"
}
}
],
"legend": {
"radix_point": 0,
"scale_by": 1,
"major_ticks": 10
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "annual_water_observations",
},
{
# Included as a keyword for the layer
"label": "WOfS Annual Statistics",
# Included as a keyword for the layer
"type": "Clear Count",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_annual_summary_clear",
# The Datacube name for the associated data product
"product_name": "wofs_annual_summary",
"abstract": """
Water Observations from Space - Annual Statistics is a set of annual statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - Annual Statistics, a set of annual statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations causes an area to appear transparent, 1-300 total clear observations of water correlate with red and yellow colours, 400 clear observations correlates with light green, 800 clear observations and above correlates with dark green.
For more information please see: https://data.dea.ga.gov.au/WOfS/annual_summary/v2.1.5/Product%20Description.pdf
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["annual_clear_observations"]
},
"wcs_default_bands": ["count_clear"],
"styles": [
{
"name": "annual_clear_observations",
"title": "Clear Count",
"abstract": "WOfS annual summary showing the count of clear observations",
"needed_bands": ["count_clear"],
"color_ramp": [
{
"value": 0,
"color": "#FFFFFF",
"alpha": 0
},
{
# purely for legend display
# we should not get fractional
# values in this styles
"value": 0.2,
"color": "#B21800",
"alpha": 1
},
{
"value": 1,
"color": "#B21800"
},
{
"value": 4,
"color": "#ef8500"
},
{
"value": 8,
"color": "#ffb800"
},
{
"value": 10,
"color": "#ffd000"
},
{
"value": 13,
"color": "#fff300"
},
{
"value": 16,
"color": "#fff300"
},
{
"value": 20,
"color": "#c1ec00"
},
{
"value": 24,
"color": "#6ee100"
},
{
"value": 28,
"color": "#39a500"
},
{
"value": 30,
"color": "#026900",
"legend": {
"prefix": ">"
}
}
],
"legend": {
"radix_point": 0,
"scale_by": 1,
"major_ticks": 10,
"axes_position": [0.05, 0.5, 0.89, 0.15]
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "annual_clear_observations",
},
{
# Included as a keyword for the layer
"label": "WOfS Annual Statistics",
# Included as a keyword for the layer
"type": "Water Summary",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_annual_summary_statistics",
# The Datacube name for the associated data product
"product_name": "wofs_annual_summary",
"abstract": """
Water Observations from Space - Annual Statistics is a set of annual statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - Annual Statistics, a set of annual statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations of water causes an area to appear transparent, few clear observations of water correlate with red and yellow colours, deep blue and purple correspond to an area being wet through 90%-100% of clear observations.
For more information please see: https://data.dea.ga.gov.au/WOfS/annual_summary/v2.1.5/Product%20Description.pdf
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["annual_WOfS_frequency",
"annual_WOfS_frequency_blues_transparent"]
},
"wcs_default_bands": ["frequency"],
"styles": [
{
"name": "annual_WOfS_frequency",
"title": " Water Summary",
"abstract": "WOfS annual summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.02,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.05,
"color": "#8e0101",
"alpha": 0.25
},
{
"value": 0.1,
"color": "#cf2200",
"alpha": 0.75
},
{
"value": 0.2,
"color": "#e38400"
},
{
"value": 0.3,
"color": "#e3df00"
},
{
"value": 0.4,
"color": "#62e300"
},
{
"value": 0.5,
"color": "#00e32d"
},
{
"value": 0.6,
"color": "#00e3c8"
},
{
"value": 0.7,
"color": "#0097e3"
},
{
"value": 0.8,
"color": "#005fe3"
},
{
"value": 0.9,
"color": "#000fe3"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
{
"name": "annual_WOfS_frequency_blues_transparent",
"title": "Water Summary (Blue)",
"abstract": "WOfS annual summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#ffffff",
"alpha": 0.0,
},
{
"value": 0.001,
"color": "#d5fef9",
"alpha": 0.0,
},
{
"value": 0.02,
"color": "#d5fef9",
},
{
"value": 0.2,
"color": "#71e3ff"
},
{
"value": 0.4,
"color": "#01ccff"
},
{
"value": 0.6,
"color": "#0178ff"
},
{
"value": 0.8,
"color": "#2701ff"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "annual_WOfS_frequency",
},
{
# Included as a keyword for the layer
"label": "WOfS April - October Statistics",
# Included as a keyword for the layer
"type": "Wet Count",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_apr_oct_summary_wet",
# The Datacube name for the associated data product
"product_name": "wofs_apr_oct_summary",
"abstract": """
Water Observations from Space - April to October Statistics is a set of seasonal statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - April to October Statistics, a set of seasonal statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations of water causes an area to appear transparent, 1-50 total clear observations of water correlate with red and yellow colours, 100 clear observations of water correlate with green, 200 clear observations of water correlates with light blue, 300 clear observations of water correlates to deep blue and 400 and over observations of clear water correlate to purple.
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["seasonal_water_observations"]
},
"wcs_default_bands": ["count_wet"],
"styles": [
{
"name": "seasonal_water_observations",
"title": "Wet Count",
"abstract": "WOfS seasonal summary showing the count of water observations",
"needed_bands": ["count_wet"],
"color_ramp": [
{
"value": 0,
"color": "#666666",
"alpha": 0
},
{
# purely for legend display
# we should not get fractional
# values in this styles
"value": 0.2,
"color": "#990000",
"alpha": 1
},
{
"value": 2,
"color": "#990000"
},
{
"value": 4,
"color": "#E38400"
},
{
"value": 6,
"color": "#E3DF00"
},
{
"value": 8,
"color": "#00E32D"
},
{
"value": 10,
"color": "#00E3C8"
},
{
"value": 12,
"color": "#0097E3"
},
{
"value": 14,
"color": "#005FE3"
},
{
"value": 16,
"color": "#000FE3"
},
{
"value": 18,
"color": "#000EA9"
},
{
"value": 20,
"color": "#5700E3",
"legend": {
"prefix": ">"
}
}
],
"legend": {
"radix_point": 0,
"scale_by": 1,
"major_ticks": 10
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "seasonal_water_observations",
},
{
# Included as a keyword for the layer
"label": "WOfS April - October Summary Statistics",
# Included as a keyword for the layer
"type": "Clear Count",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_apr_oct_summary_clear",
# The Datacube name for the associated data product
"product_name": "wofs_apr_oct_summary",
"abstract": """
Water Observations from Space - April to October Statistics is a set of seasonal statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - April to October Statistics, a set of seasonal statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations causes an area to appear transparent, 1-300 total clear observations of water correlate with red and yellow colours, 400 clear observations correlates with light green, 800 clear observations and above correlates with dark green.
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["seasonal_clear_observations"]
},
"wcs_default_bands": ["count_clear"],
"styles": [
{
"name": "seasonal_clear_observations",
"title": "Clear Count",
"abstract": "WOfS seasonal summary showing the count of clear observations",
"needed_bands": ["count_clear"],
"color_ramp": [
{
"value": 0,
"color": "#FFFFFF",
"alpha": 0
},
{
# purely for legend display
# we should not get fractional
# values in this styles
"value": 0.2,
"color": "#B21800",
"alpha": 1
},
{
"value": 1,
"color": "#B21800"
},
{
"value": 4,
"color": "#ef8500"
},
{
"value": 8,
"color": "#ffb800"
},
{
"value": 10,
"color": "#ffd000"
},
{
"value": 13,
"color": "#fff300"
},
{
"value": 16,
"color": "#fff300"
},
{
"value": 20,
"color": "#c1ec00"
},
{
"value": 24,
"color": "#6ee100"
},
{
"value": 28,
"color": "#39a500"
},
{
"value": 30,
"color": "#026900",
"legend": {
"prefix": ">"
}
}
],
"legend": {
"radix_point": 0,
"scale_by": 1,
"major_ticks": 10,
"axes_position": [0.05, 0.5, 0.89, 0.15]
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "seasonal_clear_observations",
},
{
# Included as a keyword for the layer
"label": "WOfS April - October Statistics",
# Included as a keyword for the layer
"type": "Water Summary",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_apr_oct_summary_statistics",
# The Datacube name for the associated data product
"product_name": "wofs_apr_oct_summary",
"abstract": """
Water Observations from Space - Seasonal Statistics is a set of seasonal statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - April to October Statistics, a set of seasonal statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations of water causes an area to appear transparent, few clear observations of water correlate with red and yellow colours, deep blue and purple correspond to an area being wet through 90%-100% of clear observations.
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["seasonal_WOfS_frequency",
"seasonal_WOfS_frequency_blues_transparent"]
},
"wcs_default_bands": ["frequency"],
"styles": [
{
"name": "seasonal_WOfS_frequency",
"title": " Water Summary",
"abstract": "WOfS seasonal summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.02,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.05,
"color": "#8e0101",
"alpha": 0.25
},
{
"value": 0.1,
"color": "#cf2200",
"alpha": 0.75
},
{
"value": 0.2,
"color": "#e38400"
},
{
"value": 0.3,
"color": "#e3df00"
},
{
"value": 0.4,
"color": "#62e300"
},
{
"value": 0.5,
"color": "#00e32d"
},
{
"value": 0.6,
"color": "#00e3c8"
},
{
"value": 0.7,
"color": "#0097e3"
},
{
"value": 0.8,
"color": "#005fe3"
},
{
"value": 0.9,
"color": "#000fe3"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
{
"name": "seasonal_WOfS_frequency_blues_transparent",
"title": "Water Summary (Blue)",
"abstract": "WOfS seasonal summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#ffffff",
"alpha": 0.0,
},
{
"value": 0.001,
"color": "#d5fef9",
"alpha": 0.0,
},
{
"value": 0.02,
"color": "#d5fef9",
},
{
"value": 0.2,
"color": "#71e3ff"
},
{
"value": 0.4,
"color": "#01ccff"
},
{
"value": 0.6,
"color": "#0178ff"
},
{
"value": 0.8,
"color": "#2701ff"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "seasonal_WOfS_frequency",
},
{
# Included as a keyword for the layer
"label": "WOfS November - March Statistics",
# Included as a keyword for the layer
"type": "Wet Count",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_nov_mar_summary_wet",
# The Datacube name for the associated data product
"product_name": "wofs_nov_mar_summary",
"abstract": """
Water Observations from Space - November to March Statistics is a set of seasonal statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - November to March Statistics, a set of seasonal statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations of water causes an area to appear transparent, 1-50 total clear observations of water correlate with red and yellow colours, 100 clear observations of water correlate with green, 200 clear observations of water correlates with light blue, 300 clear observations of water correlates to deep blue and 400 and over observations of clear water correlate to purple.
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["seasonal_water_observations"]
},
"wcs_default_bands": ["count_wet"],
"styles": [
{
"name": "seasonal_water_observations",
"title": "Wet Count",
"abstract": "WOfS seasonal summary showing the count of water observations",
"needed_bands": ["count_wet"],
"color_ramp": [
{
"value": 0,
"color": "#666666",
"alpha": 0
},
{
# purely for legend display
# we should not get fractional
# values in this styles
"value": 0.2,
"color": "#990000",
"alpha": 1
},
{
"value": 2,
"color": "#990000"
},
{
"value": 4,
"color": "#E38400"
},
{
"value": 6,
"color": "#E3DF00"
},
{
"value": 8,
"color": "#00E32D"
},
{
"value": 10,
"color": "#00E3C8"
},
{
"value": 12,
"color": "#0097E3"
},
{
"value": 14,
"color": "#005FE3"
},
{
"value": 16,
"color": "#000FE3"
},
{
"value": 18,
"color": "#000EA9"
},
{
"value": 20,
"color": "#5700E3",
"legend": {
"prefix": ">"
}
}
],
"legend": {
"radix_point": 0,
"scale_by": 1,
"major_ticks": 10
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "seasonal_water_observations",
},
{
# Included as a keyword for the layer
"label": "WOfS November - March Summary Statistics",
# Included as a keyword for the layer
"type": "Clear Count",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_nov_mar_summary_clear",
# The Datacube name for the associated data product
"product_name": "wofs_nov_mar_summary",
"abstract": """
Water Observations from Space - November to March Statistics is a set of seasonal statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - November to March Statistics, a set of seasonal statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations causes an area to appear transparent, 1-300 total clear observations of water correlate with red and yellow colours, 400 clear observations correlates with light green, 800 clear observations and above correlates with dark green.
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["seasonal_clear_observations"]
},
"wcs_default_bands": ["count_clear"],
"styles": [
{
"name": "seasonal_clear_observations",
"title": "Clear Count",
"abstract": "WOfS seasonal summary showing the count of clear observations",
"needed_bands": ["count_clear"],
"color_ramp": [
{
"value": 0,
"color": "#FFFFFF",
"alpha": 0
},
{
# purely for legend display
# we should not get fractional
# values in this styles
"value": 0.2,
"color": "#B21800",
"alpha": 1
},
{
"value": 1,
"color": "#B21800"
},
{
"value": 4,
"color": "#ef8500"
},
{
"value": 8,
"color": "#ffb800"
},
{
"value": 10,
"color": "#ffd000"
},
{
"value": 13,
"color": "#fff300"
},
{
"value": 16,
"color": "#fff300"
},
{
"value": 20,
"color": "#c1ec00"
},
{
"value": 24,
"color": "#6ee100"
},
{
"value": 28,
"color": "#39a500"
},
{
"value": 30,
"color": "#026900",
"legend": {
"prefix": ">"
}
}
],
"legend": {
"radix_point": 0,
"scale_by": 1,
"major_ticks": 10,
"axes_position": [0.05, 0.5, 0.89, 0.15]
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "seasonal_clear_observations",
},
{
# Included as a keyword for the layer
"label": "WOfS November - March Statistics",
# Included as a keyword for the layer
"type": "Water Summary",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_nov_mar_summary_statistics",
# The Datacube name for the associated data product
"product_name": "wofs_nov_mar_summary",
"abstract": """
Water Observations from Space - Seasonal Statistics is a set of seasonal statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - November to March Statistics, a set of seasonal statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations of water causes an area to appear transparent, few clear observations of water correlate with red and yellow colours, deep blue and purple correspond to an area being wet through 90%-100% of clear observations.
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["seasonal_WOfS_frequency",
"seasonal_WOfS_frequency_blues_transparent"]
},
"wcs_default_bands": ["frequency"],
"styles": [
{
"name": "seasonal_WOfS_frequency",
"title": " Water Summary",
"abstract": "WOfS seasonal summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.02,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.05,
"color": "#8e0101",
"alpha": 0.25
},
{
"value": 0.1,
"color": "#cf2200",
"alpha": 0.75
},
{
"value": 0.2,
"color": "#e38400"
},
{
"value": 0.3,
"color": "#e3df00"
},
{
"value": 0.4,
"color": "#62e300"
},
{
"value": 0.5,
"color": "#00e32d"
},
{
"value": 0.6,
"color": "#00e3c8"
},
{
"value": 0.7,
"color": "#0097e3"
},
{
"value": 0.8,
"color": "#005fe3"
},
{
"value": 0.9,
"color": "#000fe3"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
{
"name": "seasonal_WOfS_frequency_blues_transparent",
"title": "Water Summary (Blue)",
"abstract": "WOfS seasonal summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#ffffff",
"alpha": 0.0,
},
{
"value": 0.001,
"color": "#d5fef9",
"alpha": 0.0,
},
{
"value": 0.02,
"color": "#d5fef9",
},
{
"value": 0.2,
"color": "#71e3ff"
},
{
"value": 0.4,
"color": "#01ccff"
},
{
"value": 0.6,
"color": "#0178ff"
},
{
"value": 0.8,
"color": "#2701ff"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "seasonal_WOfS_frequency",
},
{
# Included as a keyword for the layer
"label": "WOfS Daily Observations",
# Included as a keyword for the layer
"type": "albers",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_albers",
# The Datacube name for the associated data product
"product_name": "wofs_albers",
"abstract": """
Water Observations from Space (WOfS) provides surface water observations derived from satellite imagery for all of Australia. The current product (Version 2.1.5) includes observations taken from 1986 to the present, from the Landsat 5, 7 and 8 satellites. WOfS covers all of mainland Australia and Tasmania but excludes off-shore Territories.
The WOfS product allows users to get a better understanding of where water is normally present in a landscape, where water is seldom observed, and where inundation has occurred occasionally.
Data is provided as Water Observation Feature Layers (WOFLs), in a 1 to 1 relationship with the input satellite data. Hence there is one WOFL for each satellite dataset processed for the occurrence of water. The details of the WOfS algorithm and derived statistics are available at http://dx.doi.org/10.1016/j.rse.2015.11.003.
For service status information, see https://status.dea.ga.gov.au""",
#"pq_band": "water",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 35.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [200, 180, 180, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] & data[band].attrs['nodata']) == 0,
# "pq_manual_merge": True,
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [
"nodata",
"noncontiguous",
],
# Include UTC dates for GSKY lookup
"feature_info_include_utc_dates": True,
"data_manual_merge": False,
"always_fetch_bands": [ ],
"apply_solar_corrections": False,
"fuse_func": "datacube_wms.wms_utils.wofls_fuser",
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"legend": {
"styles": ["observations"]
},
"wcs_default_bands": ["water"],
"styles": [
{
"name": "observations",
"title": "Observations",
"abstract": "Observations",
"value_map": {
"water": [
{
"title": "Invalid",
"abstract": "Slope or Cloud",
"flags": {
"or": {
"terrain_or_low_angle": True,
"cloud_shadow": True,
"cloud": True,
"high_slope": True,
"noncontiguous": True
}
},
"color": "#707070"
},
{
# Possible Sea Glint, also mark as invalid
"title": "",
"abstract": "",
"flags": {
"dry": True,
"sea": True
},
"color": "#707070"
},
{
"title": "Dry",
"abstract": "Dry",
"flags": {
"dry": True,
"sea": False,
},
"color": "#D99694"
},
{
"title": "Wet",
"abstract": "Wet or Sea",
"flags": {
"or": {
"wet": True,
"sea": True
}
},
"color": "#4F81BD"
}
]
}
},
{
"name": "wet",
"title": "Wet Only",
"abstract": "Wet Only",
"value_map": {
"water": [
{
"title": "Invalid",
"abstract": "Slope or Cloud",
"flags": {
"or": {
"terrain_or_low_angle": True,
"cloud_shadow": True,
"cloud": True,
"high_slope": True,
"noncontiguous": True
}
},
"color": "#707070",
"mask": True
},
{
# Possible Sea Glint, also mark as invalid
"title": "",
"abstract": "",
"flags": {
"dry": True,
"sea": True
},
"color": "#707070",
"mask": True
},
{
"title": "Dry",
"abstract": "Dry",
"flags": {
"dry": True,
"sea": False,
},
"color": "#D99694",
"mask": True
},
{
"title": "Wet",
"abstract": "Wet or Sea",
"flags": {
"or": {
"wet": True,
"sea": True
}
},
"color": "#4F81BD"
}
]
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "observations",
}
],
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "Sentinel-2 NRT",
"title": "Near Real-Time",
"abstract": "This is a 90-day rolling archive of daily Sentinel-2 Near Real Time data. "
"The Near Real-Time capability provides analysis-ready data "
"that is processed on receipt using the best-available ancillary information at the time to "
"provide atmospheric corrections. For more information see "
"http://pid.geoscience.gov.au/dataset/ga/122229",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "Sentinel 2 (A and B combined)",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "Surface Reflectance",
"abstract":"""
This is a 90-day rolling archive of daily Sentinel-2 Near Real Time data. The Near Real-Time capability provides analysis-ready data that is processed on receipt using the best-available ancillary information at the time to provide atmospheric corrections.
For more information see http://pid.geoscience.gov.au/dataset/ga/122229
The Normalised Difference Chlorophyll Index (NDCI) is based on the method of Mishra & Mishra 2012, and adapted to bands on the Sentinel-2A & B sensors.
The index indicates levels of chlorophyll-a (chl-a) concentrations in complex turbid productive waters such as those encountered in many inland water bodies. The index has not been validated in Australian waters, and there are a range of environmental conditions that may have an effect on the accuracy of the derived index values in this test implementation, including:
- Influence on the remote sensing signal from nearby land and/or atmospheric effects
- Optically shallow water
- Cloud cover
Mishra, S., Mishra, D.R., 2012. Normalized difference chlorophyll index: A novel model for remote estimation of chlorophyll-a concentration in turbid productive waters. Remote Sensing of Environment, Remote Sensing of Urban Environments 117, 394–406. https://doi.org/10.1016/j.rse.2011.10.016
For service status information, see https://status.dea.ga.gov.au""",
# The WMS name for the layer
"name": "s2_nrt_granule_nbar_t",
# The Datacube name for the associated data product
"multi_product": True,
"product_name": ["s2a_nrt_granule", "s2b_nrt_granule"],
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_nrt_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi", "ndwi", "ndci"]
},
"wcs_default_bands": ["nbart_red", "nbart_green", "nbart_blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"nbart_red": 1.0
},
"green": {
"nbart_green": 1.0
},
"blue": {
"nbart_blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"nbart_swir_2": 1.0
},
"green": {
"nbart_nir_1": 1.0
},
"blue": {
"nbart_green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nbart_nir_1"] - data["nbart_red"]) / (data["nbart_nir_1"] + data["nbart_red"]),
"needed_bands": ["nbart_red", "nbart_nir_1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "ndwi",
"title": "NDWI - Green, NIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": lambda data: (data["nbart_green"] - data["nbart_nir_1"]) / (
data["nbart_nir_1"] + data["nbart_green"]),
"needed_bands": ["nbart_green", "nbart_nir_1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 1.0,
"color": "#0303FF",
},
]
},
{
"name": "ndci",
"title": "NDCI - Red Edge, Red",
"abstract": "Normalised Difference Chlorophyll Index - a derived index that correlates well with the existence of chlorophyll",
"index_function": lambda data: (data["nbart_red_edge_1"] - data["nbart_red"]) / (data["nbart_red_edge_1"] + data["nbart_red"]).where(((data["nbart_green"] - data["nbart_swir_3"]) / (data["nbart_green"] + data["nbart_swir_3"])) > 0.1),
"needed_bands": ["nbart_red_edge_1", "nbart_red", "nbart_green", "nbart_swir_3"],
"color_ramp": [
{
"value": -0.1,
"color": "#1696FF",
"legend": {
"prefix" : "<"
}
},
{
"value": -0.1,
"color": "#1696FF"
},
{
"value": 0.0,
"color": "#00FFDF",
"legend": { }
},
{
"value": 0.1,
"color": "#FFF50E",
},
{
"value": 0.2,
"color": "#FFB50A",
"legend": { }
},
{
"value": 0.4,
"color": "#FF530D",
},
{
"value": 0.5,
"color": "#FF0000",
"legend": {
"prefix": ">"
}
}
]
},
{
"name": "aerosol",
"title": "Narrow Blue - 440",
"abstract": "Coastal Aerosol or Narrow Blue band, approximately 435nm to 450nm",
"components": {
"red": {
"nbart_coastal_aerosol": 1.0
},
"green": {
"nbart_coastal_aerosol": 1.0
},
"blue": {
"nbart_coastal_aerosol": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "blue",
"title": "Blue - 490",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"nbart_blue": 1.0
},
"green": {
"nbart_blue": 1.0
},
"blue": {
"nbart_blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Green - 560",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"nbart_green": 1.0
},
"green": {
"nbart_green": 1.0
},
"blue": {
"nbart_green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Red - 670",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"nbart_red": 1.0
},
"green": {
"nbart_red": 1.0
},
"blue": {
"nbart_red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_1",
"title": "Vegetation Red Edge - 710",
"abstract": "Near infra-red band, centred on 710nm",
"components": {
"red": {
"nbart_red_edge_1": 1.0
},
"green": {
"nbart_red_edge_1": 1.0
},
"blue": {
"nbart_red_edge_1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_2",
"title": "Vegetation Red Edge - 740",
"abstract": "Near infra-red band, centred on 740nm",
"components": {
"red": {
"nbart_red_edge_2": 1.0
},
"green": {
"nbart_red_edge_2": 1.0
},
"blue": {
"nbart_red_edge_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_3",
"title": "Vegetation Red Edge - 780",
"abstract": "Near infra-red band, centred on 780nm",
"components": {
"red": {
"nbart_red_edge_3": 1.0
},
"green": {
"nbart_red_edge_3": 1.0
},
"blue": {
"nbart_red_edge_3": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Near Infrared (NIR) - 840",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nbart_nir_1": 1.0
},
"green": {
"nbart_nir_1": 1.0
},
"blue": {
"nbart_nir_1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "narrow_nir",
"title": "Narrow Near Infrared - 870",
"abstract": "Near infra-red band, centred on 865nm",
"components": {
"red": {
"nbart_nir_2": 1.0
},
"green": {
"nbart_nir_2": 1.0
},
"blue": {
"nbart_nir_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Shortwave Infrared (SWIR) - 1610",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"nbart_swir_2": 1.0
},
"green": {
"nbart_swir_2": 1.0
},
"blue": {
"nbart_swir_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Shortwave Infrared (SWIR) - 2190",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"nbart_swir_3": 1.0
},
"green": {
"nbart_swir_3": 1.0
},
"blue": {
"nbart_swir_3": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
{
# Included as a keyword for the layer
"label": "Sentinel 2B",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "Surface Reflectance",
"abstract":"""
This is a 90-day rolling archive of daily Sentinel-2 Near Real Time data. The Near Real-Time capability provides analysis-ready data that is processed on receipt using the best-available ancillary information at the time to provide atmospheric corrections.
For more information see http://pid.geoscience.gov.au/dataset/ga/122229
The Normalised Difference Chlorophyll Index (NDCI) is based on the method of Mishra & Mishra 2012, and adapted to bands on the Sentinel-2A & B sensors.
The index indicates levels of chlorophyll-a (chl-a) concentrations in complex turbid productive waters such as those encountered in many inland water bodies. The index has not been validated in Australian waters, and there are a range of environmental conditions that may have an effect on the accuracy of the derived index values in this test implementation, including:
- Influence on the remote sensing signal from nearby land and/or atmospheric effects
- Optically shallow water
- Cloud cover
Mishra, S., Mishra, D.R., 2012. Normalized difference chlorophyll index: A novel model for remote estimation of chlorophyll-a concentration in turbid productive waters. Remote Sensing of Environment, Remote Sensing of Urban Environments 117, 394–406. https://doi.org/10.1016/j.rse.2011.10.016
For service status information, see https://status.dea.ga.gov.au""",
# The WMS name for the layer
"name": "s2b_nrt_granule_nbar_t",
# The Datacube name for the associated data product
"product_name": "s2b_nrt_granule",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_nrt_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi", "ndwi", "ndci"]
},
"wcs_default_bands": ["nbart_red", "nbart_green", "nbart_blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"nbart_red": 1.0
},
"green": {
"nbart_green": 1.0
},
"blue": {
"nbart_blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"nbart_swir_2": 1.0
},
"green": {
"nbart_nir_1": 1.0
},
"blue": {
"nbart_green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nbart_nir_1"] - data["nbart_red"]) / (data["nbart_nir_1"] + data["nbart_red"]),
"needed_bands": ["nbart_red", "nbart_nir_1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "ndwi",
"title": "NDWI - Green, NIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": lambda data: (data["nbart_green"] - data["nbart_nir_1"]) / (
data["nbart_nir_1"] + data["nbart_green"]),
"needed_bands": ["nbart_green", "nbart_nir_1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 1.0,
"color": "#0303FF",
},
]
},
{
"name": "ndci",
"title": "NDCI - Red Edge, Red",
"abstract": "Normalised Difference Chlorophyll Index - a derived index that correlates well with the existence of chlorophyll",
"index_function": lambda data: (data["nbart_red_edge_1"] - data["nbart_red"]) / (data["nbart_red_edge_1"] + data["nbart_red"]).where(((data["nbart_green"] - data["nbart_swir_3"]) / (data["nbart_green"] + data["nbart_swir_3"])) > 0.1),
"needed_bands": ["nbart_red_edge_1", "nbart_red", "nbart_green", "nbart_swir_3"],
"color_ramp": [
{
"value": -0.1,
"color": "#1696FF",
"legend": {
"prefix" : "<"
}
},
{
"value": -0.1,
"color": "#1696FF"
},
{
"value": 0.0,
"color": "#00FFDF",
"legend": { }
},
{
"value": 0.1,
"color": "#FFF50E",
},
{
"value": 0.2,
"color": "#FFB50A",
"legend": { }
},
{
"value": 0.4,
"color": "#FF530D",
},
{
"value": 0.5,
"color": "#FF0000",
"legend": {
"prefix": ">"
}
}
]
},
{
"name": "aerosol",
"title": "Narrow Blue - 440",
"abstract": "Coastal Aerosol or Narrow Blue band, approximately 435nm to 450nm",
"components": {
"red": {
"nbart_coastal_aerosol": 1.0
},
"green": {
"nbart_coastal_aerosol": 1.0
},
"blue": {
"nbart_coastal_aerosol": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "blue",
"title": "Blue - 490",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"nbart_blue": 1.0
},
"green": {
"nbart_blue": 1.0
},
"blue": {
"nbart_blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Green - 560",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"nbart_green": 1.0
},
"green": {
"nbart_green": 1.0
},
"blue": {
"nbart_green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Red - 670",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"nbart_red": 1.0
},
"green": {
"nbart_red": 1.0
},
"blue": {
"nbart_red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_1",
"title": "Vegetation Red Edge - 710",
"abstract": "Near infra-red band, centred on 710nm",
"components": {
"red": {
"nbart_red_edge_1": 1.0
},
"green": {
"nbart_red_edge_1": 1.0
},
"blue": {
"nbart_red_edge_1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_2",
"title": "Vegetation Red Edge - 740",
"abstract": "Near infra-red band, centred on 740nm",
"components": {
"red": {
"nbart_red_edge_2": 1.0
},
"green": {
"nbart_red_edge_2": 1.0
},
"blue": {
"nbart_red_edge_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_3",
"title": "Vegetation Red Edge - 780",
"abstract": "Near infra-red band, centred on 780nm",
"components": {
"red": {
"nbart_red_edge_3": 1.0
},
"green": {
"nbart_red_edge_3": 1.0
},
"blue": {
"nbart_red_edge_3": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Near Infrared (NIR) - 840",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nbart_nir_1": 1.0
},
"green": {
"nbart_nir_1": 1.0
},
"blue": {
"nbart_nir_1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "narrow_nir",
"title": "Narrow Near Infrared - 870",
"abstract": "Near infra-red band, centred on 865nm",
"components": {
"red": {
"nbart_nir_2": 1.0
},
"green": {
"nbart_nir_2": 1.0
},
"blue": {
"nbart_nir_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Shortwave Infrared (SWIR) - 1610",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"nbart_swir_2": 1.0
},
"green": {
"nbart_swir_2": 1.0
},
"blue": {
"nbart_swir_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Shortwave Infrared (SWIR) - 2190",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"nbart_swir_3": 1.0
},
"green": {
"nbart_swir_3": 1.0
},
"blue": {
"nbart_swir_3": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
{
# Included as a keyword for the layer
"label": "Sentinel 2A",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "Surface Reflectance",
"abstract": """
This is a 90-day rolling archive of daily Sentinel-2 Near Real Time data. The Near Real-Time capability provides analysis-ready data that is processed on receipt using the best-available ancillary information at the time to provide atmospheric corrections.
For more information see http://pid.geoscience.gov.au/dataset/ga/122229
The Normalised Difference Chlorophyll Index (NDCI) is based on the method of Mishra & Mishra 2012, and adapted to bands on the Sentinel-2A & B sensors.
The index indicates levels of chlorophyll-a (chl-a) concentrations in complex turbid productive waters such as those encountered in many inland water bodies. The index has not been validated in Australian waters, and there are a range of environmental conditions that may have an effect on the accuracy of the derived index values in this test implementation, including:
- Influence on the remote sensing signal from nearby land and/or atmospheric effects
- Optically shallow water
- Cloud cover
Mishra, S., Mishra, D.R., 2012. Normalized difference chlorophyll index: A novel model for remote estimation of chlorophyll-a concentration in turbid productive waters. Remote Sensing of Environment, Remote Sensing of Urban Environments 117, 394–406. https://doi.org/10.1016/j.rse.2011.10.016
For service status information, see https://status.dea.ga.gov.au""",
# The WMS name for the layer
"name": "s2a_nrt_granule_nbar_t",
# The Datacube name for the associated data product
"product_name": "s2a_nrt_granule",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_nrt_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi", "ndwi", "ndci"]
},
"wcs_default_bands": ["nbart_red", "nbart_green", "nbart_blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"nbart_red": 1.0
},
"green": {
"nbart_green": 1.0
},
"blue": {
"nbart_blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"nbart_swir_2": 1.0
},
"green": {
"nbart_nir_1": 1.0
},
"blue": {
"nbart_green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nbart_nir_1"] - data["nbart_red"]) / (
data["nbart_nir_1"] + data["nbart_red"]),
"needed_bands": ["nbart_red", "nbart_nir_1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "ndwi",
"title": "NDWI - Green, NIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": lambda data: (data["nbart_green"] - data["nbart_nir_1"]) / (
data["nbart_nir_1"] + data["nbart_green"]),
"needed_bands": ["nbart_green", "nbart_nir_1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 1.0,
"color": "#0303FF",
},
]
},
{
"name": "ndci",
"title": "NDCI - Red Edge, Red",
"abstract": "Normalised Difference Chlorophyll Index - a derived index that correlates well with the existence of chlorophyll",
"index_function": lambda data: (data["nbart_red_edge_1"] - data["nbart_red"]) / (data["nbart_red_edge_1"] + data["nbart_red"]).where(((data["nbart_green"] - data["nbart_swir_3"]) / (data["nbart_green"] + data["nbart_swir_3"])) > 0.1),
"needed_bands": ["nbart_red_edge_1", "nbart_red", "nbart_green", "nbart_swir_3"],
"color_ramp": [
{
"value": -0.1,
"color": "#1696FF",
"legend": {
"prefix" : "<"
}
},
{
"value": -0.1,
"color": "#1696FF"
},
{
"value": 0.0,
"color": "#00FFDF",
"legend": { }
},
{
"value": 0.1,
"color": "#FFF50E",
},
{
"value": 0.2,
"color": "#FFB50A",
"legend": { }
},
{
"value": 0.4,
"color": "#FF530D",
},
{
"value": 0.5,
"color": "#FF0000",
"legend": {
"prefix": ">"
}
}
]
},
{
"name": "aerosol",
"title": "Narrow Blue - 440",
"abstract": "Coastal Aerosol or Narrow Blue band, approximately 435nm to 450nm",
"components": {
"red": {
"nbart_coastal_aerosol": 1.0
},
"green": {
"nbart_coastal_aerosol": 1.0
},
"blue": {
"nbart_coastal_aerosol": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "blue",
"title": "Blue - 490",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"nbart_blue": 1.0
},
"green": {
"nbart_blue": 1.0
},
"blue": {
"nbart_blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Green - 560",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"nbart_green": 1.0
},
"green": {
"nbart_green": 1.0
},
"blue": {
"nbart_green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Red - 670",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"nbart_red": 1.0
},
"green": {
"nbart_red": 1.0
},
"blue": {
"nbart_red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_1",
"title": "Vegetation Red Edge - 710",
"abstract": "Near infra-red band, centred on 710nm",
"components": {
"red": {
"nbart_red_edge_1": 1.0
},
"green": {
"nbart_red_edge_1": 1.0
},
"blue": {
"nbart_red_edge_1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_2",
"title": "Vegetation Red Edge - 740",
"abstract": "Near infra-red band, centred on 740nm",
"components": {
"red": {
"nbart_red_edge_2": 1.0
},
"green": {
"nbart_red_edge_2": 1.0
},
"blue": {
"nbart_red_edge_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red_edge_3",
"title": "Vegetation Red Edge - 780",
"abstract": "Near infra-red band, centred on 780nm",
"components": {
"red": {
"nbart_red_edge_3": 1.0
},
"green": {
"nbart_red_edge_3": 1.0
},
"blue": {
"nbart_red_edge_3": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Near Infrared (NIR) - 840",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nbart_nir_1": 1.0
},
"green": {
"nbart_nir_1": 1.0
},
"blue": {
"nbart_nir_1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "narrow_nir",
"title": "Narrow Near Infrared - 870",
"abstract": "Near infra-red band, centred on 865nm",
"components": {
"red": {
"nbart_nir_2": 1.0
},
"green": {
"nbart_nir_2": 1.0
},
"blue": {
"nbart_nir_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Shortwave Infrared (SWIR) - 1610",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"nbart_swir_2": 1.0
},
"green": {
"nbart_swir_2": 1.0
},
"blue": {
"nbart_swir_2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Shortwave Infrared (SWIR) - 2190",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"nbart_swir_3": 1.0
},
"green": {
"nbart_swir_3": 1.0
},
"blue": {
"nbart_swir_3": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
],
},
{
"name": "multi_scale_topographic_position",
"title": "Multi-Scale Topographic Position",
"abstract": "",
"products": [
{
"label": "Multi-Scale Topographic Position",
"abstract": """A Multi-scale topographic position image of Australia has been generated by combining
a topographic position index and topographic ruggedness. Topographic Position Index (TPI) measures
the topographic slope position of landforms. Ruggedness informs on the roughness of the surface and
is calculated as the standard deviation of elevations. Both these terrain attributes are therefore
scale dependent and will vary according to the size of the analysis window. Based on an algorithm
developed by Lindsay et al. (2015) we have generated multi-scale topographic position model over the
Australian continent using 3 second resolution (~90m) DEM derived from the Shuttle Radar Topography
Mission (SRTM). The algorithm calculates topographic position scaled by the corresponding ruggedness
across three spatial scales (window sizes) of 0.2-8.1 Km; 8.2-65.2 Km and 65.6-147.6 Km. The derived
ternary image captures variations in topographic position across these spatial scales (blue local,
green intermediate and red regional) and gives a rich representation of nested landform features that
have broad application in understanding geomorphological and hydrological processes and in mapping
regolith and soils over the Australian continent. Lindsay, J, B., Cockburn, J.M.H. and Russell,
H.A.J. 2015. An integral image approach to performing multi-scale topographic position analysis,
Geomorphology 245, 51–61.
For service status information, see https://status.dea.ga.gov.au""",
"type": "1 degree tile",
"variant": "",
"name": "multi_scale_topographic_position",
"product_name": "multi_scale_topographic_position",
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": ["regional", "intermediate", "local"],
"apply_solar_corrections": False,
"legend": {
"url": "https://data.dea.ga.gov.au/multi-scale-topographic-position/mstp_legend.png",
# "styles": ["mstp_rgb"]
},
"wcs_default_bands": ["regional", "intermediate", "local"],
"styles": [
{
"name": "mstp_rgb",
"title": "Multi-scale Topographic Position",
"abstract": "red regional, green intermediate and blue local",
"components": {
"red": {
"regional": 1.0
},
"green": {
"intermediate": 1.0
},
"blue": {
"local": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 255.0]
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "mstp_rgb",
},
]
},
{
"name": "weathering_intensity",
"title": "Weathering Intensity",
"abstract": "",
"products": [
{
"label": "Weathering Intensity",
"abstract": "Weathering intensity or the degree of weathering is an important characteristic of the "
"earth’s surface that has a significant influence on the chemical and physical properties "
"of surface materials. Weathering intensity largely controls the degree to which primary "
"minerals are altered to secondary components including clay minerals and oxides. The "
"degree of surface weathering is particularly important in Australia where variations in "
"weathering intensity correspond to the nature and distribution of regolith (weathered "
"bedrock and sediments) which mantles approximately 90% of the Australian continent. The "
"weathering intensity prediction has been generated using the Random Forest decision tree "
"machine learning algorithm. The algorithm is used to establish predictive relationships "
"between field estimates of the degree of weathering and a comprehensive suite of "
"covariate or predictive datasets. The covariates used to generate the model include "
"satellite imagery, terrain attributes, airborne radiometric imagery and mapped geology. "
"Correlations between the training dataset and the covariates were explored through the "
"generation of 300 random tree models. An r-squared correlation of 0.85 is reported using "
"5 K-fold cross-validation. The mean of the 300 models is used for predicting the "
"weathering intensity and the uncertainty in the weathering intensity is estimated at "
"each location via the standard deviation in the 300 model values. The predictive "
"weathering intensity model is an estimate of the degree of surface weathering only. The "
"interpretation of the weathering intensity is different for in-situ or residual "
"landscapes compared with transported materials within depositional landscapes. In "
"residual landscapes, weathering process are operating locally whereas in depositional "
"landscapes the model is reflecting the degree of weathering either prior to erosion and "
"subsequent deposition, or weathering of sediments after being deposited. The weathering "
"intensity model has broad utility in assisting mineral exploration in variably weathered "
"geochemical landscapes across the Australian continent, mapping chemical and physical "
"attributes of soils in agricultural landscapes and in understanding the nature and "
"distribution of weathering processes occurring within the upper regolith."
"For service status information, see https://status.dea.ga.gov.au",
"type": "1 degree tile",
"variant": "",
"name": "weathering_intensity",
"product_name": "weathering_intensity",
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": ["intensity"],
"apply_solar_corrections": False,
"legend": {
"styles": ["wii"]
},
"wcs_default_bands": ["intensity"],
"styles": [
{
"name": "wii",
"title": "Weathering Intensity",
"abstract": "Weather Intensity Index (0-6)",
"needed_bands": ["intensity"],
"color_ramp": [
{
'value': 0,
'color': '#ffffff',
'alpha': 0
},
{
'value': 1,
'color': '#2972a8',
'legend': {
'label': 'Low\nClass 1'
}
},
{
'value': 3.5,
'color': '#fcf24b'
},
{
'value': 6,
'color': '#a02406',
'legend': {
'label': 'High\nClass 6'
}
}
],
"legend": {
"axes_position": [0.1, 0.5, 0.8, 0.15]
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "wii",
},
]
},
{
"name": "fcp_green_veg",
"title": "Fractional Cover Percentiles - Green Vegetation",
"abstract": "",
"products": [
{
"label": "Fractional Cover Percentiles - Green Vegetation",
"abstract": """
Fractional Cover Percentiles version 2.2.0, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
Fractional cover provides information about the the proportions of green vegetation, non-green vegetation (including deciduous trees during autumn, dry grass, etc.), and bare areas for every 25m x 25m ground footprint. Fractional cover provides insight into how areas of dry vegetation and/or bare soil and green vegetation are changing over time. The percentile summaries are designed to make it easier to analyse and interpret fractional cover. Percentiles provide an indicator of where an observation sits, relative to the rest of the observations for the pixel. For example, the 90th percentile is the value below which 90% of the observations fall. The fractional cover algorithm was developed by the Joint Remote Sensing Research Program, for more information please see data.auscover.org.au/xwiki/bin/view/Product+pages/Landsat+Fractional+Cover
This contains the percentage of green vegetation per pixel at the 10th, 50th (median) and 90th percentiles for observations acquired in each full calendar year (1st of January - 31st December) from 1987 to the most recent full calendar year.
Fractional Cover products use Water Observations from Space (WOfS) to mask out areas of water, cloud and other phenomena. To be considered in the FCP product a pixel must have had at least 10 clear observations over the year.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "fcp_green_veg",
"product_name": "fc_percentile_albers_annual",
"pq_dataset": "geodata_coast_100k",
"pq_band": "land",
"pq_ignore_time": True,
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": [ ],
"apply_solar_corrections": False,
"legend": {
"styles": ["green_veg_10"]
},
"wcs_default_bands": ["PV_PC_10", "PV_PC_50", "PV_PC_90"],
"styles": [
{
"name": "green_veg_10",
"title": "10th Percentile",
"abstract": "10th Percentile of Green Vegetation",
"needed_bands": ["PV_PC_10"],
"color_ramp": [
{
'value': 0,
'color': '#ffffcc',
'legend': {}
},
{
'value': 25,
'color': '#c2e699',
'legend': {}
},
{
'value': 50,
'color': '#78c679',
'legend': {}
},
{
'value': 75,
'color': '#31a354',
'legend': {}
},
{
'value': 100,
'color': '#006837',
'legend': {}
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
"legend": {
"units": "% / pixel",
"title": "Percentage of Pixel that is Green Vegetation",
"rcParams": {
"font.size": 9
}
}
},
{
"name": "green_veg_50",
"title": "50th Percentile",
"abstract": "50th Percentile of Green Vegetation",
"needed_bands": ["PV_PC_50"],
"color_ramp": [
{
'value': 0,
'color': '#ffffcc'
},
{
'value': 25,
'color': '#c2e699'
},
{
'value': 50,
'color': '#78c679'
},
{
'value': 75,
'color': '#31a354'
},
{
'value': 100,
'color': '#006837'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
{
"name": "green_veg_90",
"title": "90th Percentile",
"abstract": "90th Percentile of Green Vegetation",
"needed_bands": ["PV_PC_90"],
"color_ramp": [
{
'value': 0,
'color': '#ffffcc'
},
{
'value': 25,
'color': '#c2e699'
},
{
'value': 50,
'color': '#78c679'
},
{
'value': 75,
'color': '#31a354'
},
{
'value': 100,
'color': '#006837'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "green_veg_10",
},
]
},
{
"name": "fcp_non_green_veg",
"title": "Fractional Cover Percentiles - Non Green Vegetation",
"abstract": "",
"products": [
{
"label": "Fractional Cover Percentiles - Non Green Vegetation",
"abstract": """
Fractional Cover Percentiles version 2.2.0, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
Fractional cover provides information about the the proportions of green vegetation, non-green vegetation (including deciduous trees during autumn, dry grass, etc.), and bare areas for every 25m x 25m ground footprint. Fractional cover provides insight into how areas of dry vegetation and/or bare soil and green vegetation are changing over time. The percentile summaries are designed to make it easier to analyse and interpret fractional cover. Percentiles provide an indicator of where an observation sits, relative to the rest of the observations for the pixel. For example, the 90th percentile is the value below which 90% of the observations fall. The fractional cover algorithm was developed by the Joint Remote Sensing Research Program, for more information please see data.auscover.org.au/xwiki/bin/view/Product+pages/Landsat+Fractional+Cover
This contains the percentage of non-green vegetation per pixel at the 10th, 50th (median) and 90th percentiles for observations acquired in each full calendar year (1st of January - 31st December) from 1987 to the most recent full calendar year.
Fractional Cover products use Water Observations from Space (WOfS) to mask out areas of water, cloud and other phenomena. To be considered in the FCP product a pixel must have had at least 10 clear observations over the year.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "fcp_non_green_veg",
"product_name": "fc_percentile_albers_annual",
"pq_dataset": "geodata_coast_100k",
"pq_band": "land",
"pq_ignore_time": True,
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": [],
"apply_solar_corrections": False,
"legend": {
"styles": ["non_green_veg_10"]
},
"wcs_default_bands": ["NPV_PC_10", "NPV_PC_50", "NPV_PC_90"],
"styles": [
{
"name": "non_green_veg_10",
"title": "10th Percentile",
"abstract": "10th Percentile of Non Green Vegetation",
"needed_bands": ["NPV_PC_10"],
"color_ramp": [
{
'value': 0,
'color': '#ffffd4',
'legend': {}
},
{
'value': 25,
'color': '#fed98e',
'legend': {}
},
{
'value': 50,
'color': '#fe9929',
'legend': {}
},
{
'value': 75,
'color': '#d95f0e',
'legend': {}
},
{
'value': 100,
'color': '#993404',
'legend': {}
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
"legend": {
"units": "% / pixel",
"title": "Percentage of Pixel that is Non-Green Vegetation",
"rcParams": {
"font.size": 9
}
}
},
{
"name": "non_green_veg_50",
"title": "50th Percentile",
"abstract": "50th Percentile of Non Green Vegetation",
"needed_bands": ["NPV_PC_50"],
"color_ramp": [
{
'value': 0,
'color': '#ffffd4'
},
{
'value': 25,
'color': '#fed98e'
},
{
'value': 50,
'color': '#fe9929'
},
{
'value': 75,
'color': '#d95f0e'
},
{
'value': 100,
'color': '#993404'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
{
"name": "non_green_veg_90",
"title": "90th Percentile",
"abstract": "90th Percentile of Non Green Vegetation",
"needed_bands": ["NPV_PC_90"],
"color_ramp": [
{
'value': 0,
'color': '#ffffd4'
},
{
'value': 25,
'color': '#fed98e'
},
{
'value': 50,
'color': '#fe9929'
},
{
'value': 75,
'color': '#d95f0e'
},
{
'value': 100,
'color': '#993404'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "non_green_veg_10",
},
]
},
{
"name": "fcp_bare_soil",
"title": "Fractional Cover Percentiles - Bare Soil",
"abstract": "",
"products": [
{
"label": "Fractional Cover Percentiles - Bare Soil",
"abstract": """
Fractional Cover Percentiles version 2.2.0, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
Fractional cover provides information about the the proportions of green vegetation, non-green vegetation (including deciduous trees during autumn, dry grass, etc.), and bare areas for every 25m x 25m ground footprint. Fractional cover provides insight into how areas of dry vegetation and/or bare soil and green vegetation are changing over time. The percentile summaries are designed to make it easier to analyse and interpret fractional cover. Percentiles provide an indicator of where an observation sits, relative to the rest of the observations for the pixel. For example, the 90th percentile is the value below which 90% of the observations fall. The fractional cover algorithm was developed by the Joint Remote Sensing Research Program for more information please see data.auscover.org.au/xwiki/bin/view/Product+pages/Landsat+Fractional+Cover
This contains the percentage of bare soil per pixel at the 10th, 50th (median) and 90th percentiles for observations acquired in each full calendar year (1st of January - 31st December) from 1987 to the most recent full calendar year.
Fractional Cover products use Water Observations from Space (WOfS) to mask out areas of water, cloud and other phenomena. To be considered in the FCP product a pixel must have had at least 10 clear observations over the year.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "fcp_bare_ground",
"product_name": "fc_percentile_albers_annual",
"pq_dataset": "geodata_coast_100k",
"pq_band": "land",
"pq_ignore_time": True,
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": [],
"apply_solar_corrections": False,
"legend": {
"styles": ["bare_ground_10"]
},
"wcs_default_bands": ["BS_PC_10", "BS_PC_50", "BS_PC_90"],
"styles": [
{
"name": "bare_ground_10",
"title": "10th Percentile",
"abstract": "10th Percentile of Bare Soil",
"needed_bands": ["BS_PC_10"],
"color_ramp": [
{
'value': 0,
'color': '#feebe2',
'legend': {}
},
{
'value': 25,
'color': '#fbb4b9',
'legend': {}
},
{
'value': 50,
'color': '#f768a1',
'legend': {}
},
{
'value': 75,
'color': '#c51b8a',
'legend': {}
},
{
'value': 100,
'color': '#7a0177',
'legend': {}
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
"legend": {
"units": "% / pixel",
"title": "Percentage of Pixel that is Bare Soil",
"rcParams": {
"font.size": 9
}
}
},
{
"name": "bare_ground_50",
"title": "50th Percentile",
"abstract": "50th Percentile of Bare Soil",
"needed_bands": ["BS_PC_50"],
"color_ramp": [
{
'value': 0,
'color': '#feebe2'
},
{
'value': 25,
'color': '#fbb4b9'
},
{
'value': 50,
'color': '#f768a1'
},
{
'value': 75,
'color': '#c51b8a'
},
{
'value': 100,
'color': '#7a0177'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
{
"name": "bare_ground_90",
"title": "90th Percentile",
"abstract": "90th Percentile of Bare Soil",
"needed_bands": ["BS_PC_90"],
"color_ramp": [
{
'value': 0,
'color': '#feebe2'
},
{
'value': 25,
'color': '#fbb4b9'
},
{
'value': 50,
'color': '#f768a1'
},
{
'value': 75,
'color': '#c51b8a'
},
{
'value': 100,
'color': '#7a0177'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "bare_ground_10",
},
]
},
{
"name": "fcp_rgb",
"title": "Fractional Cover Percentiles - Median",
"abstract": "",
"products": [
{
"label": "Fractional Cover Percentiles - Median",
"abstract": """
Fractional Cover Percentiles version 2.2.0, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
Fractional cover provides information about the the proportions of green vegetation, non-green vegetation (including deciduous trees during autumn, dry grass, etc.), and bare areas for every 25m x 25m ground footprint. Fractional cover provides insight into how areas of dry vegetation and/or bare soil and green vegetation are changing over time. The percentile summaries are designed to make it easier to analyse and interpret fractional cover. Percentiles provide an indicator of where an observation sits, relative to the rest of the observations for the pixel. For example, the 90th percentile is the value below which 90% of the observations fall. The fractional cover algorithm was developed by the Joint Remote Sensing Research Program.
This contains a three band combination of the 50th Percentile for green vegetation, non green vegetation and bare soil observations acquired in each full calendar year (1st of January - 31st December) from 1987 to the most recent full calendar year.
Fractional Cover products use Water Observations from Space (WOfS) to mask out areas of water, cloud and other phenomena. To be considered in the FCP product a pixel must have had at least 10 clear observations over the year.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "fcp_rgb",
"product_name": "fc_percentile_albers_annual",
"pq_dataset": "geodata_coast_100k",
"pq_band": "land",
"pq_ignore_time": True,
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": [],
"apply_solar_corrections": False,
"legend": {
"url": "https://data.dea.ga.gov.au/fractional-cover/fc-percentile/annual/v2.1.0/fcp_legend.png",
},
"wcs_default_bands": ["BS_PC_50", "PV_PC_50", "NPV_PC_50"],
"styles": [
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"BS_PC_50": 1.0
},
"green": {
"PV_PC_50": 1.0
},
"blue": {
"NPV_PC_50": 1.0
}
},
"scale_range": [0.0, 100.0],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
]
},
{
"name": "fcp_seasonal",
"title": "Fractional Cover Percentiles Seasonal",
"abstract": "",
"products": [
{
"label": "Green Vegetation",
"abstract": """
Fractional Cover Percentiles version 2.2.0, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
Fractional cover provides information about the the proportions of green vegetation, non-green vegetation (including deciduous trees during autumn, dry grass, etc.), and bare areas for every 25m x 25m ground footprint. Fractional cover provides insight into how areas of dry vegetation and/or bare soil and green vegetation are changing over time. The percentile summaries are designed to make it easier to analyse and interpret fractional cover. Percentiles provide an indicator of where an observation sits, relative to the rest of the observations for the pixel. For example, the 90th percentile is the value below which 90% of the observations fall. The fractional cover algorithm was developed by the Joint Remote Sensing Research Program, for more information please see data.auscover.org.au/xwiki/bin/view/Product+pages/Landsat+Fractional+Cover
FC-PERCENTILE-SEASONAL-SUMMARY, this contains a (10th, 50th and 90th percentile) of BS, PV and NPV of observations acquired within each calendar season (DJF, MAM, JJA, SON). This product is available for the most recent 8 seasons
Fractional Cover products use Water Observations from Space (WOfS) to mask out areas of water, cloud and other phenomena. To be considered in the FCP product a pixel must have had at least 10 clear observations over the year.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "fcp_seasonal_green_veg",
"product_name": "fc_percentile_albers_seasonal",
"pq_dataset": "geodata_coast_100k",
"pq_band": "land",
"pq_ignore_time": True,
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": [ ],
"apply_solar_corrections": False,
"legend": {
"styles": ["green_veg_10"]
},
"wcs_default_bands": ["PV_PC_10", "PV_PC_50", "PV_PC_90"],
"styles": [
{
"name": "green_veg_10",
"title": "10th Percentile",
"abstract": "10th Percentile of Green Vegetation",
"needed_bands": ["PV_PC_10"],
"color_ramp": [
{
'value': 0,
'color': '#ffffcc',
'legend': {}
},
{
'value': 25,
'color': '#c2e699',
'legend': {}
},
{
'value': 50,
'color': '#78c679',
'legend': {}
},
{
'value': 75,
'color': '#31a354',
'legend': {}
},
{
'value': 100,
'color': '#006837',
'legend': {}
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
"legend": {
"units": "% / pixel",
"title": "Percentage of Pixel that is Green Vegetation",
"rcParams": {
"font.size": 9
}
}
},
{
"name": "green_veg_50",
"title": "50th Percentile",
"abstract": "50th Percentile of Green Vegetation",
"needed_bands": ["PV_PC_50"],
"color_ramp": [
{
'value': 0,
'color': '#ffffcc'
},
{
'value': 25,
'color': '#c2e699'
},
{
'value': 50,
'color': '#78c679'
},
{
'value': 75,
'color': '#31a354'
},
{
'value': 100,
'color': '#006837'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
{
"name": "green_veg_90",
"title": "90th Percentile",
"abstract": "90th Percentile of Green Vegetation",
"needed_bands": ["PV_PC_90"],
"color_ramp": [
{
'value': 0,
'color': '#ffffcc'
},
{
'value': 25,
'color': '#c2e699'
},
{
'value': 50,
'color': '#78c679'
},
{
'value': 75,
'color': '#31a354'
},
{
'value': 100,
'color': '#006837'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "green_veg_10",
},
{
"label": "Non Green Vegetation",
"abstract": """
Fractional Cover Percentiles version 2.2.0, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
Fractional cover provides information about the the proportions of green vegetation, non-green vegetation (including deciduous trees during autumn, dry grass, etc.), and bare areas for every 25m x 25m ground footprint. Fractional cover provides insight into how areas of dry vegetation and/or bare soil and green vegetation are changing over time. The percentile summaries are designed to make it easier to analyse and interpret fractional cover. Percentiles provide an indicator of where an observation sits, relative to the rest of the observations for the pixel. For example, the 90th percentile is the value below which 90% of the observations fall. The fractional cover algorithm was developed by the Joint Remote Sensing Research Program, for more information please see data.auscover.org.au/xwiki/bin/view/Product+pages/Landsat+Fractional+Cover
FC-PERCENTILE-SEASONAL-SUMMARY, this contains a (10th, 50th and 90th percentile) of BS, PV and NPV of observations acquired within each calendar season (DJF, MAM, JJA, SON). This product is available for the most recent 8 seasons
Fractional Cover products use Water Observations from Space (WOfS) to mask out areas of water, cloud and other phenomena. To be considered in the FCP product a pixel must have had at least 10 clear observations over the year.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "fcp_seasonal_non_green_veg",
"product_name": "fc_percentile_albers_seasonal",
"pq_dataset": "geodata_coast_100k",
"pq_band": "land",
"pq_ignore_time": True,
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": [],
"apply_solar_corrections": False,
"legend": {
"styles": ["non_green_veg_10"]
},
"wcs_default_bands": ["NPV_PC_10", "NPV_PC_50", "NPV_PC_90"],
"styles": [
{
"name": "non_green_veg_10",
"title": "10th Percentile",
"abstract": "10th Percentile of Non Green Vegetation",
"needed_bands": ["NPV_PC_10"],
"color_ramp": [
{
'value': 0,
'color': '#ffffd4',
'legend': {}
},
{
'value': 25,
'color': '#fed98e',
'legend': {}
},
{
'value': 50,
'color': '#fe9929',
'legend': {}
},
{
'value': 75,
'color': '#d95f0e',
'legend': {}
},
{
'value': 100,
'color': '#993404',
'legend': {}
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
"legend": {
"units": "% / pixel",
"title": "Percentage of Pixel that is Non-Green Vegetation",
"rcParams": {
"font.size": 9
}
}
},
{
"name": "non_green_veg_50",
"title": "50th Percentile",
"abstract": "50th Percentile of Non Green Vegetation",
"needed_bands": ["NPV_PC_50"],
"color_ramp": [
{
'value': 0,
'color': '#ffffd4'
},
{
'value': 25,
'color': '#fed98e'
},
{
'value': 50,
'color': '#fe9929'
},
{
'value': 75,
'color': '#d95f0e'
},
{
'value': 100,
'color': '#993404'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
{
"name": "non_green_veg_90",
"title": "90th Percentile",
"abstract": "90th Percentile of Non Green Vegetation",
"needed_bands": ["NPV_PC_90"],
"color_ramp": [
{
'value': 0,
'color': '#ffffd4'
},
{
'value': 25,
'color': '#fed98e'
},
{
'value': 50,
'color': '#fe9929'
},
{
'value': 75,
'color': '#d95f0e'
},
{
'value': 100,
'color': '#993404'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "non_green_veg_10",
},
{
"label": "Bare Soil",
"abstract": """
Fractional Cover Percentiles version 2.2.0, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
Fractional cover provides information about the the proportions of green vegetation, non-green vegetation (including deciduous trees during autumn, dry grass, etc.), and bare areas for every 25m x 25m ground footprint. Fractional cover provides insight into how areas of dry vegetation and/or bare soil and green vegetation are changing over time. The percentile summaries are designed to make it easier to analyse and interpret fractional cover. Percentiles provide an indicator of where an observation sits, relative to the rest of the observations for the pixel. For example, the 90th percentile is the value below which 90% of the observations fall. The fractional cover algorithm was developed by the Joint Remote Sensing Research Program for more information please see data.auscover.org.au/xwiki/bin/view/Product+pages/Landsat+Fractional+Cover
FC-PERCENTILE-SEASONAL-SUMMARY, this contains a (10th, 50th and 90th percentile) of BS, PV and NPV of observations acquired within each calendar season (DJF, MAM, JJA, SON). This product is available for the most recent 8 seasons
Fractional Cover products use Water Observations from Space (WOfS) to mask out areas of water, cloud and other phenomena. To be considered in the FCP product a pixel must have had at least 10 clear observations over the year.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "fcp_seasonal_bare_ground",
"product_name": "fc_percentile_albers_seasonal",
"pq_dataset": "geodata_coast_100k",
"pq_band": "land",
"pq_ignore_time": True,
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": [],
"apply_solar_corrections": False,
"legend": {
"styles": ["bare_ground_10"]
},
"wcs_default_bands": ["BS_PC_10", "BS_PC_50", "BS_PC_90"],
"styles": [
{
"name": "bare_ground_10",
"title": "10th Percentile",
"abstract": "10th Percentile of Bare Soil",
"needed_bands": ["BS_PC_10"],
"color_ramp": [
{
'value': 0,
'color': '#feebe2',
'legend': {}
},
{
'value': 25,
'color': '#fbb4b9',
'legend': {}
},
{
'value': 50,
'color': '#f768a1',
'legend': {}
},
{
'value': 75,
'color': '#c51b8a',
'legend': {}
},
{
'value': 100,
'color': '#7a0177',
'legend': {}
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
"legend": {
"units": "% / pixel",
"title": "Percentage of Pixel that is Bare Soil",
"rcParams": {
"font.size": 9
}
}
},
{
"name": "bare_ground_50",
"title": "50th Percentile",
"abstract": "50th Percentile of Bare Soil",
"needed_bands": ["BS_PC_50"],
"color_ramp": [
{
'value': 0,
'color': '#feebe2'
},
{
'value': 25,
'color': '#fbb4b9'
},
{
'value': 50,
'color': '#f768a1'
},
{
'value': 75,
'color': '#c51b8a'
},
{
'value': 100,
'color': '#7a0177'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
{
"name": "bare_ground_90",
"title": "90th Percentile",
"abstract": "90th Percentile of Bare Soil",
"needed_bands": ["BS_PC_90"],
"color_ramp": [
{
'value': 0,
'color': '#feebe2'
},
{
'value': 25,
'color': '#fbb4b9'
},
{
'value': 50,
'color': '#f768a1'
},
{
'value': 75,
'color': '#c51b8a'
},
{
'value': 100,
'color': '#7a0177'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "bare_ground_10",
},
{
"label": "Median",
"abstract": """
Fractional Cover Percentiles version 2.2.0, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
Fractional cover provides information about the the proportions of green vegetation, non-green vegetation (including deciduous trees during autumn, dry grass, etc.), and bare areas for every 25m x 25m ground footprint. Fractional cover provides insight into how areas of dry vegetation and/or bare soil and green vegetation are changing over time. The percentile summaries are designed to make it easier to analyse and interpret fractional cover. Percentiles provide an indicator of where an observation sits, relative to the rest of the observations for the pixel. For example, the 90th percentile is the value below which 90% of the observations fall. The fractional cover algorithm was developed by the Joint Remote Sensing Research Program.
FC-PERCENTILE-SEASONAL-SUMMARY, this contains a (10th, 50th and 90th percentile) of BS, PV and NPV of observations acquired within each calendar season (DJF, MAM, JJA, SON). This product is available for the most recent 8 seasons
Fractional Cover products use Water Observations from Space (WOfS) to mask out areas of water, cloud and other phenomena. To be considered in the FCP product a pixel must have had at least 10 clear observations over the year.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "fcp_seasonal_rgb",
"product_name": "fc_percentile_albers_seasonal",
"pq_dataset": "geodata_coast_100k",
"pq_band": "land",
"pq_ignore_time": True,
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": [],
"apply_solar_corrections": False,
"legend": {
"url": "https://data.dea.ga.gov.au/fractional-cover/fc-percentile/annual/v2.1.0/fcp_legend.png",
},
"wcs_default_bands": ["BS_PC_50", "PV_PC_50", "NPV_PC_50"],
"styles": [
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"BS_PC_50": 1.0
},
"green": {
"PV_PC_50": 1.0
},
"blue": {
"NPV_PC_50": 1.0
}
},
"scale_range": [0.0, 100.0],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
]
},
{
"name": "nidem",
"title": "National Intertidal Digital Elevation Model",
"abstract": "",
"products": [
{
"label": "NIDEM 25m",
"abstract": "The National Intertidal Digital Elevation Model (NIDEM) product is a continental-scale "
"dataset providing continuous elevation data for Australia's exposed intertidal zone. "
"NIDEM provides the first three-dimensional representation of Australia's intertidal zone "
"(excluding off-shore Territories and intertidal mangroves) at 25 m spatial resolution, "
"addressing a key gap between the availability of sub-tidal bathymetry and "
"terrestrial elevation data. NIDEM was generated by combining global tidal modelling "
"with a 30-year time series archive of spatially and spectrally calibrated "
"Landsat satellite data managed within the Digital Earth Australia (DEA) platform. "
"NIDEM complements existing intertidal extent products, and provides data to support a "
"new suite of use cases that require a more detailed understanding of the three-dimensional "
"topography of the intertidal zone, such as hydrodynamic modelling, coastal risk management "
"and ecological habitat mapping."
"For service status information, see https://status.dea.ga.gov.au",
"type": "grid",
"variant": "nidem_v1.0.0",
"name": "NIDEM",
"product_name": "nidem",
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": ["nidem"],
"apply_solar_corrections": False,
"legend": {
"styles": ["NIDEM"]
},
"wcs_default_bands": ["nidem"],
"styles": [
{
"name": "NIDEM",
"title": "National Intertidal Digital Elevation Model",
"abstract": "National Intertidal Digital Elevation Model 25 m v1.0.0",
"needed_bands": ["nidem"],
"color_ramp": [
{
'value': -2.51,
'color': '#440154'
},
{
'value': -2.5,
'color': '#440154',
'legend': {
"prefix": "<"
}
},
{
'value': -2.34,
'color': '#460e61',
},
{
'value': -2.18,
'color': '#471b6e',
},
{
'value': -2.02,
'color': '#472877'
},
{
'value': -1.86,
'color': '#45347f'
},
{
'value': -1.7,
'color': '#413f85'
},
{
'value': -1.58,
'color': '#3b4d8a'
},
{
'value': -1.42,
'color': '#37578b'
},
{
'value': -1.26,
'color': '#32618c'
},
{
'value': -1.1,
'color': '#2e6b8d',
"legend": {}
},
{
'value': -0.94,
'color': '#2a748e'
},
{
'value': -0.78,
'color': '#267d8e'
},
{
'value': -0.62,
'color': '#23868d'
},
{
'value': -0.46,
'color': '#208f8c'
},
{
'value': -0.3,
'color': '#1e9889'
},
{
'value': -0.14,
'color': '#1fa186'
},
{
'value': 0.0,
'color': '#26ac7f',
"legend": { }
},
{
'value': 0.14,
'color': '#32b579'
},
{
'value': 0.3,
'color': '#41bd70'
},
{
'value': 0.46,
'color': '#54c566'
},
{
'value': 0.62,
'color': '#69cc59'
},
{
'value': 0.78,
'color': '#80d24b'
},
{
'value': 0.94,
'color': '#99d83c'
},
{
'value': 1.1,
'color': '#b2dc2c',
},
{
'value': 1.26,
'color': '#cce01e'
},
{
'value': 1.42,
'color': '#e5e31a'
},
{
'value': 1.5,
'color': '#fde724',
'legend': {
"prefix": ">"
}
}
],
"legend": {
"units": "metres"
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "NIDEM",
},
]
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "HLTC Composites",
"title": "High Tide Low Tide Composite",
"abstract": "The High and Low Tide Composites product is composed of two surface reflectance composite mosaics "
"of Landsat TM and ETM+ (Landsat 5 and Landsat 7 respectively) and OLI (Landsat 8) "
"surface reflectance data (Li et al., 2012). These products have been produced using "
"Digital Earth Australia (DEA). The two mosaics allow cloud free and noise reduced visualisation "
"of the shallow water and inter-tidal coastal regions of Australia, as observed at "
"high and low tide respectively.The composites are generated utilising the geomedian approach of "
"Roberts et al (2017) to ensure a valid surface reflectance spectra suitable for uses such as "
"habitat mapping. The time range used for composite generation in each polygon of the mosaic is "
"tailored to ensure dynamic coastal features are captured whilst still allowing a clean and cloud "
"free composite to be generated. The concepts of the Observed Tidal Range (OTR), "
"and Highest and Lowest Observed Tide (HOT, LOT) are discussed and described fully in Sagar et al. "
"(2017) and the product description for the ITEM v 1.0 product (Geoscience Australia, 2016).",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "High Tide",
"abstract":"""
High Tide and Low Tide Composites 2.0.0
The High and Low Tide Composites product is composed of two surface reflectance composite mosaics of Landsat TM and ETM+ (Landsat 5 and Landsat 7 respectively) and OLI (Landsat 8) surface reflectance data (Li et al., 2012). These products have been produced using Digital Earth Australia (DEA).
The two mosaics allow cloud free and noise reduced visualisation of the shallow water and inter-tidal coastal regions of Australia, as observed at high and low tide respectively (Sagar et al. 2018).
The composites are generated utilising the geomedian approach of Roberts et al (2017) to ensure a valid surface reflectance spectra suitable for uses such as habitat mapping.
The time range used for composite generation in each polygon of the mosaic is tailored to ensure dynamic coastal features are captured whilst still allowing a clean and cloud free composite to be generated. The concepts of the Observed Tidal Range (OTR), and Highest and Lowest Observed Tide (HOT, LOT) are discussed and described fully in Sagar et al. (2017) and the product description for the ITEM v 1.0 product (Geoscience Australia, 2016).
*Overview*
Inter-tidal zones are difficult regions to characterise due to the dynamic nature of the tide. They are highly changeable environments, subject to forcings from the land, sea and atmosphere and yet they form critical habitats for a wide range of organisms from birds to fish and sea grass.
By harnessing the long archive of satellite imagery over Australia's coastal zones in the DEA and pairing the images with regional tidal modelling, the archive can be sorted by tide height rather than date, enabling the inter-tidal zone to be viewed at any stage of the tide regime.
The High Low Tide Composites (HLTC_25) product is composed of two mosaics, distinguished by tide height, representing a composite image of the synthetic geomedian surface reflectance from Landsats 5 TM, Landsat 7 ETM+ and Landsat 8 OLI NBAR data (Li et al., 2012; Roberts et al., 2017). Oregon State Tidal Prediction (OTPS) software (Egbert and Erofeeva, 2002, 2010) was used to generate tide heights, relative to mean sea level, for the Australian continental coastline, split into 306 distinct tidal regions.
These time and date stamped tidal values were then attributed to all coastal tile observations for their time of acquisition, creating a range of observed tide heights for the Australian coastline. The two mosaics in HLTC_25 are composited from the highest and lowest 20 % of observed tide in the ensemble and are termed HOT and LOT respectively.
A geomedian composite for each Landsat band is calculated from the tiles in each ensemble subset to produce the respective HOT and LOT composites. Note that Landsat 7 ETM+ observations are excluded after May 2003 due to a large number of data artifacts.
The time range used for composite generation in each of the 306 polygons of the mosaics are tailored to ensure dynamic coastal features are captured whilst still allowing a clean and cloud free composite to be generated.
The maximum epoch for which the products are calculated is between 1995-2017, although this varies due to data resolution and observation quality. The product also includes a count of clear observations per pixel for both mosaics and attribute summaries per polygon that include the date range, the highest and lowest modeled astronomical tide as well as the highest and lowest observed tide for that time range, the total observation count and the maximum count of observations for any one pixel in the polygon, the polygon ID number (from 1 to 306), the polygon centroid in longitude and latitude and the count of tide stages attributed to every observation used in that polygon of the mosaic. For the count of tidal stage observations, e = ebbing tide, f = flowing tide, ph = peak high tide and pl = peak low tide.
The tide stages were calculated bycomparison to the modeled tide data for 15 minutes either side of the observation to determine the ebb, flow or peak movement of the tide.
Observations are filtered to remove poor quality observations including cloud, cloud shadow and band saturation (of any band).
For service status information, see https://status.dea.ga.gov.au""",
# Included as a keyword for the layer
"type": "Tidal Composite",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "high_tide_composite",
# The Datacube name for the associated data product
"product_name": "high_tide_comp_20p",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 35.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [],
"apply_solar_corrections": False,
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi", "ndwi"]
},
"wcs_default_bands": ["red", "green", "blue"],
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 0.30]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 0.30]
},
#
# Examples of non-linear heat-mapped styles.
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "ndwi",
"title": "NDWI - Green, SWIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": lambda data: (data["green"] - data["swir1"]) / (
data["swir1"] + data["green"]),
"needed_bands": ["green", "swir1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 1.0,
"color": "#0303FF",
},
]
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
{
# Included as a keyword for the layer
"label": "Low Tide",
"abstract": """
High Tide and Low Tide Composites 2.0.0
The High and Low Tide Composites product is composed of two surface reflectance composite mosaics of Landsat TM and ETM+ (Landsat 5 and Landsat 7 respectively) and OLI (Landsat 8) surface reflectance data (Li et al., 2012). These products have been produced using Digital Earth Australia (DEA).
The two mosaics allow cloud free and noise reduced visualisation of the shallow water and inter-tidal coastal regions of Australia, as observed at high and low tide respectively (Sagar et al. 2018).
The composites are generated utilising the geomedian approach of Roberts et al (2017) to ensure a valid surface reflectance spectra suitable for uses such as habitat mapping.
The time range used for composite generation in each polygon of the mosaic is tailored to ensure dynamic coastal features are captured whilst still allowing a clean and cloud free composite to be generated. The concepts of the Observed Tidal Range (OTR), and Highest and Lowest Observed Tide (HOT, LOT) are discussed and described fully in Sagar et al. (2017) and the product description for the ITEM v 1.0 product (Geoscience Australia, 2016).
*Overview*
Inter-tidal zones are difficult regions to characterise due to the dynamic nature of the tide. They are highly changeable environments, subject to forcings from the land, sea and atmosphere and yet they form critical habitats for a wide range of organisms from birds to fish and sea grass.
By harnessing the long archive of satellite imagery over Australia's coastal zones in the DEA and pairing the images with regional tidal modelling, the archive can be sorted by tide height rather than date, enabling the inter-tidal zone to be viewed at any stage of the tide regime.
The High Low Tide Composites (HLTC_25) product is composed of two mosaics, distinguished by tide height, representing a composite image of the synthetic geomedian surface reflectance from Landsats 5 TM, Landsat 7 ETM+ and Landsat 8 OLI NBAR data (Li et al., 2012; Roberts et al., 2017). Oregon State Tidal Prediction (OTPS) software (Egbert and Erofeeva, 2002, 2010) was used to generate tide heights, relative to mean sea level, for the Australian continental coastline, split into 306 distinct tidal regions.
These time and date stamped tidal values were then attributed to all coastal tile observations for their time of acquisition, creating a range of observed tide heights for the Australian coastline. The two mosaics in HLTC_25 are composited from the highest and lowest 20 % of observed tide in the ensemble and are termed HOT and LOT respectively.
A geomedian composite for each Landsat band is calculated from the tiles in each ensemble subset to produce the respective HOT and LOT composites. Note that Landsat 7 ETM+ observations are excluded after May 2003 due to a large number of data artifacts.
The time range used for composite generation in each of the 306 polygons of the mosaics are tailored to ensure dynamic coastal features are captured whilst still allowing a clean and cloud free composite to be generated.
The maximum epoch for which the products are calculated is between 1995-2017, although this varies due to data resolution and observation quality. The product also includes a count of clear observations per pixel for both mosaics and attribute summaries per polygon that include the date range, the highest and lowest modeled astronomical tide as well as the highest and lowest observed tide for that time range, the total observation count and the maximum count of observations for any one pixel in the polygon, the polygon ID number (from 1 to 306), the polygon centroid in longitude and latitude and the count of tide stages attributed to every observation used in that polygon of the mosaic. For the count of tidal stage observations, e = ebbing tide, f = flowing tide, ph = peak high tide and pl = peak low tide.
The tide stages were calculated bycomparison to the modeled tide data for 15 minutes either side of the observation to determine the ebb, flow or peak movement of the tide.
Observations are filtered to remove poor quality observations including cloud, cloud shadow and band saturation (of any band).
For service status information, see https://status.dea.ga.gov.au""",
# Included as a keyword for the layer
"type": "Tidal Composite",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "low_tide_composite",
# The Datacube name for the associated data product
"product_name": "low_tide_comp_20p",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 35.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [],
"apply_solar_corrections": False,
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ndvi", "ndwi"]
},
"wcs_default_bands": ["red", "green", "blue"],
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 0.30]
},
{
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 0.30]
},
#
# Examples of non-linear heat-mapped styles.
{
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
]
},
{
"name": "ndwi",
"title": "NDWI - Green, SWIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": lambda data: (data["green"] - data["swir1"]) / (
data["swir1"] + data["green"]),
"needed_bands": ["green", "swir1"],
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 1.0,
"color": "#0303FF",
},
]
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
]
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "ITEM",
"title": "Intertidal Extents Model",
"abstract": "The Intertidal Extents Model (ITEM) product is a national dataset of the exposed intertidal zone; "
"the land between the observed highest and lowest tide. ITEM provides the extent and topography of "
"the intertidal zone of Australia's coastline (excluding off-shore Territories). "
"This information was collated using observations in the Landsat archive since 1986. "
"ITEM can be a valuable complimentary dataset to both onshore LiDAR survey data and coarser offshore "
"bathymetry data, enabling a more realistic representation of the land and ocean interface.",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "Relative Layer",
"abstract": """
The Intertidal Extents Model (ITEM v2.0) product analyses GA’s historic archive of satellite imagery to derive a model of the spatial extents of the intertidal zone throughout the tidal cycle. The model can assist in understanding the relative elevation profile of the intertidal zone,
delineating exposed areas at differing tidal heights and stages.
The product differs from previous methods used to map the intertidal zone which have been predominately focused on analysing a small number of individual satellite images per location (e.g Ryu et al., 2002; Murray et al., 2012).
By utilising a full 30 year time series of observations and a global tidal model (Egbert and Erofeeva, 2002), the methodology enables us to overcome the requirement for clear, high quality observations acquired concurrent to the time of high and low tide.
*Accuracy and limitations*
Due the sun-synchronous nature of the various Landsat sensor observations; it is unlikely that the full physical extents of the tidal range in any cell will be observed. Hence, terminology has been adopted for the product to reflect the highest modelled tide observed in a given cell (HOT) and the lowest modelled tide observed (LOT) (see Sagar et al. 2017). These measures are relative to Mean Sea Level, and have no consistent relationship to Lowest (LAT) and Highest Astronomical Tide (HAT).
The inclusion of the lowest (LMT) and highest (HMT) modelled tide values for each tidal polygon indicates the highest and lowest tides modelled for that location across the full time series by the OTPS model. The relative difference between the LOT and LMT (and HOT and HMT) heights gives an indication of the extent of the tidal range represented in the Relative Extents Model.
As in ITEM v1.0, v2.0 contains some false positive land detection in open ocean regions. These are a function of the lack of data at the extremes of the observed tidal range, and features like glint and undetected cloud in these data poor regions/intervals. Methods to isolate and remove these features are in development for future versions. Issues in the DEA archive and data noise in the Esperance, WA region off Cape Le Grande and Cape Arid (Polygons 236,201,301) has resulted in significant artefacts in the model, and use of the model in this area is not recommended.
The Confidence layer is designed to assess the reliability of the Relative Extent Model. Within each tidal range percentile interval, the pixel-based standard deviation of the NDWI values for all observations in the interval subset is calculated. The average standard deviation across all tidal range intervals is then calculated and retained as a quality indicator in this product layer.
The Confidence Layer reflects the pixel based consistency of the NDWI values within each subset of observations, based on the tidal range. Higher standard deviation values indicate water classification changes not based on the tidal cycle, and hence lower confidence in the extent model.
Possible drivers of these changes include:
Inadequacies of the tidal model, due perhaps to complex coastal bathymetry or estuarine structures not captured in the model. These effects have been reduced in ITEM v2.0 compared to previous versions, through the use of an improved tidal modelling frameworkChange in the structure and exposure of water/non-water features NOT driven by tidal variation.
For example, movement of sand banks in estuaries, construction of man-made features (ports etc.).Terrestrial/Inland water features not influenced by the tidal cycle.
File naming:
THE RELATIVE EXTENTS MODEL v2.0
ITEM_REL_<TIDAL POLYGON NUMBER>_<LONGITUDE>_<LATITUDE>
TIDAL POLYGON NUMBER relates to the id of the tidal polygon referenced by the file
LONGITUDE is the longitude of the centroid of the tidal polygon
LATITUDE is the latitude of the centroid of the tidal polygon
THE CONFIDENCE LAYER v2.0
ITEM_STD_<TIDAL POLYGON NUMBER>_<LONGITUDE>_<LATITUDE>
TIDAL POLYGON NUMBER relates to the id of the tidal polygon referenced by the file
LONGITUDE is the longitude of the centroid of the tidal polygon
LATITUDE is the latitude of the centroid of the tidal polygon
*Overview*
The Intertidal Extents Model product is a national scale gridded dataset characterising the spatial extents of the exposed intertidal zone, at intervals of the observed tidal range (Sagar et al. 2017).The current version (2.0) utilises all Landsat observations (5, 7, and 8) for Australian coastal regions (excluding off-shore Territories) between 1986 and 2016 (inclusive).
ITEM v2.0 has implemented an improved tidal modelling framework (see Sagar et al. 2018) over that utilised in ITEM v1.0. The expanded Landsat archive within the Digital Earth Australia (DEA) has also enabled the model extent to be increased to cover a number of offshore reefs, including the full Great Barrier Reef and southern sections of the Torres Strait Islands.
The DEA archive and new tidal modelling framework has improved the coverage and quality of the ITEM v2.0 relative extents model, particularly in regions where AGDC cell boundaries in ITEM v1.0 produced discontinuities or the imposed v1.0 cell structure resulted in poor quality tidal modelling (see Sagar et al. 2017).
For service status information, see https://status.dea.ga.gov.au""",
# Included as a keyword for the layer
"type": "ITEM v2.0.0",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "ITEM_V2.0.0",
# The Datacube name for the associated data product
"product_name": "item_v2",
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": ["relative"],
"apply_solar_corrections": False,
"legend": {
"url": "https://data.dea.ga.gov.au/item_v2/v2.0.1/relative/ITEM_REL_Legend.png"
},
"wcs_default_bands": ["relative"],
"styles": [
{
"name": "relative_layer",
"title": "relative layer",
"abstract": "The Relative Extents Model (item_v2) 25m v2.0.0",
"needed_bands": ["relative"],
"color_ramp": [
{
'value': 0.0,
'color': '#000000',
'alpha': 0.0
},
{
'value': 1.0,
'color': '#d7191c',
'alpha': 1.0
},
{
'value': 2.0,
'color': '#ec6e43',
},
{
'value': 3.0,
'color': '#fdb96e',
},
{
'value': 4.0,
'color': '#fee7a4',
},
{
'value': 5.0,
'color': '#e7f5b7',
},
{
'value': 6.0,
'color': '#b7e1a7',
},
{
'value': 7.0,
'color': '#74b6ad',
},
{
'value': 8.0,
'color': '#2b83ba'
},
{
'value': 9.0,
'color': '#000000',
'alpha': 0.0
},
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 10.0,
"major_ticks": 1
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "relative_layer",
},
{
# Included as a keyword for the layer
"label": "Confidence Layer",
"abstract": """
The Intertidal Extents Model (ITEM v2.0) product analyses GA’s historic archive of satellite imagery to derive a model of the spatial extents of the intertidal zone throughout the tidal cycle. The model can assist in understanding the relative elevation profile of the intertidal zone,
delineating exposed areas at differing tidal heights and stages.
The product differs from previous methods used to map the intertidal zone which have been predominately focused on analysing a small number of individual satellite images per location (e.g Ryu et al., 2002; Murray et al., 2012).
By utilising a full 30 year time series of observations and a global tidal model (Egbert and Erofeeva, 2002), the methodology enables us to overcome the requirement for clear, high quality observations acquired concurrent to the time of high and low tide.
*Accuracy and limitations*
Due the sun-synchronous nature of the various Landsat sensor observations; it is unlikely that the full physical extents of the tidal range in any cell will be observed. Hence, terminology has been adopted for the product to reflect the highest modelled tide observed in a given cell (HOT) and the lowest modelled tide observed (LOT) (see Sagar et al. 2017). These measures are relative to Mean Sea Level, and have no consistent relationship to Lowest (LAT) and Highest Astronomical Tide (HAT).
The inclusion of the lowest (LMT) and highest (HMT) modelled tide values for each tidal polygon indicates the highest and lowest tides modelled for that location across the full time series by the OTPS model. The relative difference between the LOT and LMT (and HOT and HMT) heights gives an indication of the extent of the tidal range represented in the Relative Extents Model.
As in ITEM v1.0, v2.0 contains some false positive land detection in open ocean regions. These are a function of the lack of data at the extremes of the observed tidal range, and features like glint and undetected cloud in these data poor regions/intervals. Methods to isolate and remove these features are in development for future versions. Issues in the DEA archive and data noise in the Esperance, WA region off Cape Le Grande and Cape Arid (Polygons 236,201,301) has resulted in significant artefacts in the model, and use of the model in this area is not recommended.
The Confidence layer is designed to assess the reliability of the Relative Extent Model. Within each tidal range percentile interval, the pixel-based standard deviation of the NDWI values for all observations in the interval subset is calculated. The average standard deviation across all tidal range intervals is then calculated and retained as a quality indicator in this product layer.
The Confidence Layer reflects the pixel based consistency of the NDWI values within each subset of observations, based on the tidal range. Higher standard deviation values indicate water classification changes not based on the tidal cycle, and hence lower confidence in the extent model.
Possible drivers of these changes include:
Inadequacies of the tidal model, due perhaps to complex coastal bathymetry or estuarine structures not captured in the model. These effects have been reduced in ITEM v2.0 compared to previous versions, through the use of an improved tidal modelling frameworkChange in the structure and exposure of water/non-water features NOT driven by tidal variation.
For example, movement of sand banks in estuaries, construction of man-made features (ports etc.).Terrestrial/Inland water features not influenced by the tidal cycle.
File naming:
THE RELATIVE EXTENTS MODEL v2.0
ITEM_REL_<TIDAL POLYGON NUMBER>_<LONGITUDE>_<LATITUDE>
TIDAL POLYGON NUMBER relates to the id of the tidal polygon referenced by the file
LONGITUDE is the longitude of the centroid of the tidal polygon
LATITUDE is the latitude of the centroid of the tidal polygon
THE CONFIDENCE LAYER v2.0
ITEM_STD_<TIDAL POLYGON NUMBER>_<LONGITUDE>_<LATITUDE>
TIDAL POLYGON NUMBER relates to the id of the tidal polygon referenced by the file
LONGITUDE is the longitude of the centroid of the tidal polygon
LATITUDE is the latitude of the centroid of the tidal polygon
*Overview*
The Intertidal Extents Model product is a national scale gridded dataset characterising the spatial extents of the exposed intertidal zone, at intervals of the observed tidal range (Sagar et al. 2017).The current version (2.0) utilises all Landsat observations (5, 7, and 8) for Australian coastal regions (excluding off-shore Territories) between 1986 and 2016 (inclusive).
ITEM v2.0 has implemented an improved tidal modelling framework (see Sagar et al. 2018) over that utilised in ITEM v1.0. The expanded Landsat archive within the Digital Earth Australia (DEA) has also enabled the model extent to be increased to cover a number of offshore reefs, including the full Great Barrier Reef and southern sections of the Torres Strait Islands.
The DEA archive and new tidal modelling framework has improved the coverage and quality of the ITEM v2.0 relative extents model, particularly in regions where AGDC cell boundaries in ITEM v1.0 produced discontinuities or the imposed v1.0 cell structure resulted in poor quality tidal modelling (see Sagar et al. 2017).
For service status information, see https://status.dea.ga.gov.au""",
# Included as a keyword for the layer
"type": "ITEM v2.0.0",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "ITEM_V2.0.0_Conf",
# The Datacube name for the associated data product
"product_name": "item_v2_conf",
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"fuse_func": "datacube_wms.wms_utils.item_fuser",
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": ["stddev"],
"apply_solar_corrections": False,
"legend": {
"styles": ["confidence_layer"]
},
"wcs_default_bands": ["stddev"],
"styles": [
{
"name": "confidence_layer",
"title": "confidence layer",
"abstract": "The Confidence layer (item_v2_conf) 25m v2.0.0",
"needed_bands": ["stddev"],
"color_ramp": [
{
'value': 0.0,
'color': '#2b83ba',
'alpha': 0.0
},
{
'value': 0.01,
'color': '#2b83ba',
'legend': {
"prefix": "<"
}
},
{
'value': 0.055,
'color': '#55a1b2',
},
{
'value': 0.1,
'color': '#80bfab',
},
{
'value': 0.145,
'color': '#abdda4',
},
{
'value': 0.19,
'color': '#c7e8ad',
},
{
'value': 0.235,
'color': '#e3f3b6',
},
{
'value': 0.28,
'color': '#fdbf6f',
},
{
'value': 0.325,
'color': '#e37d1c',
},
{
'value': 0.37,
'color': '#e35e1c',
},
{
'value': 0.415,
'color': '#e31a1c',
},
{
'value': 0.46,
'color': '#e31a1c',
},
{
'value': 0.505,
'color': '#e31a1c',
},
{
'value': 0.55,
'color': '#e31a1c',
'legend': {
"prefix": ">"
}
},
],
"legend": {
"units": "NDWI standard deviation"
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "confidence_layer",
},
]
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "water_bodies",
"title": "Projects",
"abstract": "Projects",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "Water Bodies",
"abstract": "NSW Water Bodies Project"
"For service status information, see https://status.dea.ga.gov.au",
# Included as a keyword for the layer
"type": "NSW",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "water_bodies",
# The Datacube name for the associated data product
"product_name": "water_bodies",
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != 65535,
# include links to csv, {dam_id: 2611} becomes ".../026/02611.csv"
"feature_info_include_custom": lambda data: {
'timeseries': f"https://data.dea.ga.gov.au"
f"/projects/WaterBodies/feature_info/"
f"{data['dam_id'] // 100:03}/{data['dam_id']:05}.csv"
},
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": ["dam_id"],
"apply_solar_corrections": False,
"legend": {
"styles": []
},
"wcs_default_bands": ["dam_id"],
"styles": [
{
"name": "dam_id",
"title": "Water Body",
"abstract": "",
"needed_bands": ["dam_id"],
"color_ramp": [
{
'value': 0,
'color': '#11ccff',
'alpha': 1.0
},
{
'value': 65534,
'color': '#11ccff',
'alpha': 1.0
},
],
"legend": {
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "dam_id",
},
{
# Included as a keyword for the layer
"label": "HAP",
# Included as a keyword for the layer
"type": "historical airborne photography",
# Included as a keyword for the layer
"variant": "munged",
# The WMS name for the layer
"name": "historical_airborne_photography",
# The Datacube name for the associated data product
"product_name": "historical_airborne_photography",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 500.0,
# Min zoom factor (above) works well for small-tiled requests, (e.g. 256x256 as sent by Terria).
# However, for large-tiled requests (e.g. as sent by QGIS), large and intensive queries can still
# go through to the datacube.
# max_datasets_wms specifies a maximum number of datasets that a GetMap request can retrieve.
# Indicatative polygons are displayed if a request exceeds the limits imposed by EITHER max_dataset_wms
# OR min_zoom_factor.
# max_datasets_wms should be set in conjunction with min_zoom_factor so that Terria style 256x256
# tiled requests respond consistently - you never want to see a mixture of photographic tiles and polygon
# tiles at a given zoom level. i.e. max_datasets_wms should be greater than the number of datasets
# required for most intensive possible photographic query given the min_zoom_factor.
# Note that the ideal value may vary from product to product depending on the size of the dataset
# extents for the product.
# Defaults to zero, which is interpreted as no dataset limit.
# 6 seems to work with a min_zoom_factor of 500.0 for "old-style" Net-CDF albers tiled data.
"max_datasets_wms": 6,
# max_datasets_wcs is the WCS equivalent of max_datasets_wms. The main requirement for setting this
# value is to avoid gateway timeouts on overly large WCS requests (and reduce server load).
"max_datasets_wcs": 16,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
'wcs_default_bands':['Band_1'],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
# LS7: http://www.indexdatabase.de/db/s-single.php?id=8
# LS8: http://www.indexdatabase.de/db/s-single.php?id=168
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_gray",
"title": "Simple gray",
"abstract": "Simple grayscale image",
"components": {
"red": {
"Band_1": 1.0
},
"green": {
"Band_1": 1.0
},
"blue": {
"Band_1": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 255]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_gray",
}
]
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "National ASTER Map",
"title": "National ASTER Map of Australia",
"abstract": """
This datsaet comprises a set of 14+ geoscience products made up of mosaiced ASTER scenes across Australia.
The individual geoscience products are a compbination of bands and band ratios to highlight different mineral groups and parameters including:
False colour composite
CSIRO Landsat TM Regolith Ratios
Green vegetation content
Ferric oxide content
Ferric oxide composition
Ferrous iron index
Opaque index
AlOH group content
AlOH group composition
Kaolin group index
FeOH group content
MgOH group content
MgOH group composition
Ferrous iron content in MgOH/carbonate""",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "False Colour Mosaic",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
False colour RGB composite
Red: B3
Green: B2
Blue: B1
(red = green vegetation)
Use this image to help understand non-geological differences within and between ASTER scenes caused by green vegetation (red), fire scars, thin and thick cloud and cloud shadows.
Use band 2 only for a gray-scale background to the content, composition and index colour products.""",
# The WMS name for the layer
"name": "aster_false_colour",
# The Datacube name for the associated data product
"product_name": "aster_false_colour",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"wcs_default_bands": ["Band_1", "Band_2", "Band_3"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "false_colour",
"title": "False Colour",
"abstract": "Simple false-colour image using ASTER Bands 3 as red, 2 as green and 1 as blue",
"components": {
"red": {
"Band_1": 1.0
},
"green": {
"Band_2": 1.0
},
"blue": {
"Band_3": 1.0
}
},
"scale_range": [0.0, 255.0]
},
{
"name": "gray",
"title": "B2 Grayscale",
"abstract": "Simple grayscale image using ASTER Band 2",
"components": {
"red": {
"Band_2": 1.0
},
"green": {
"Band_2": 1.0
},
"blue": {
"Band_2": 1.0
}
},
"scale_range": [0.0, 255.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "false_colour",
}, # ASTER False Colour
{
# Included as a keyword for the layer
"label": "Regolith Ratios",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
3 band RGB composite
Red: B3/B2
Green: B3/B7
Blue: B4/B7
(white = green vegetation)
Use this image to help interpret:
(1) the amount of green vegetation cover (appears as white);
(2) basic spectral separation (colour) between different regolith and geological units and regions/provinces; and
(3) evidence for unmasked cloud (appears as green).""",
# The WMS name for the layer
"name": "aster_regolith_ratios",
# The Datacube name for the associated data product
"product_name": "aster_regolith_ratios",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"wcs_default_bands": ["Band_1", "Band_2", "Band_3"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"Band_1": 1.0
},
"green": {
"Band_2": 1.0
},
"blue": {
"Band_3": 1.0
}
},
"scale_range": [0.0, 255.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
}, # ASTER Regolith Ratios
{
# Included as a keyword for the layer
"label": "AlOH Group Composition",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B5/B7
Blue is well ordered kaolinite, Al-rich muscovite/illite, paragonite, pyrophyllite
Red is Al-poor (Si-rich) muscovite (phengite)
Useful for mapping:
(1) exposed saprolite/saprock is often white mica or Al-smectite (warmer colours) whereas transported materials are often kaolin-rich (cooler colours);
(2) clays developed over carbonates, especially Al-smectite (montmorillonite, beidellite) will produce middle to warmers colours;
(3) stratigraphic mapping based on different clay-types; and
(4) lithology-overprinting hydrothermal alteration, e.g. Si-rich and K-rich phengitic mica (warmer colours).
Combine with Ferrous iron in MgOH and FeOH content products to look for evidence of overlapping/juxtaposed potassic metasomatism in ferromagnesian parents rocks (e.g. Archaean greenstone associated Au mineralisation) +/- associated distal propyllitic alteration (e.g. chlorite, amphibole).""",
# The WMS name for the layer
"name": "aster_aloh_group_composition",
# The Datacube name for the associated data product
"product_name": "aster_aloh_group_composition",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B5/B7 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "0.9"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "1.3"
}
}
],
"legend": {
"units": "Blue is well ordered kaolinite,\nRed is Al-poor (Si-rich) muscovite (phengite)",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER AlOH Group Composition
{
# Included as a keyword for the layer
"label": "AlOH Group Content",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: (B5+B7)/B6
Blue is low abundance
Red is high abundance
(potentially includes: phengite, muscovite, paragonite, lepidolite, illite, brammalite, montmorillonite, beidellite, kaolinite, dickite)
Useful for mapping:
(1) exposed saprolite/saprock;
(2) clay-rich stratigraphic horizons;
(3) lithology-overprinting hydrothermal phyllic (e.g. white mica) alteration; and
(4) clay-rich diluents in ore systems (e.g. clay in iron ore).
Also combine with AlOH composition to help map:
(1) exposed in situ parent material persisting through “cover” which can be expressed as:
(a) more abundant AlOH content + (b) long-wavelength (warmer colour) AlOH composition (e.g. muscovite/phengite).""",
# The WMS name for the layer
"name": "aster_aloh_group_content",
# The Datacube name for the associated data product
"product_name": "aster_aloh_group_content",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "(B5+B7)/B6 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "2.0"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "2.25"
}
}
],
"legend": {
"units": "Blue is low content,\nRed is high content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER AlOH Group Content
{
# Included as a keyword for the layer
"label": "FeOH Group Content",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: (B6+B8)/B7
Blue is low content,
Red is high content
(potentially includes: chlorite, epidote, jarosite, nontronite, gibbsite, gypsum, opal-chalcedony
Useful for mapping:
(1) jarosite (acid conditions) – in combination with ferric oxide content (high);
(2) gypsum/gibbsite – in combination with ferric oxide content (low);
(3) magnesite - in combination with ferric oxide content (low) and MgOH content (moderate-high);
(4) chlorite (e.g. propyllitic alteration) – in combination with Ferrous in MgOH (high); and
(5) epidote (calc-silicate alteration) – in combination with Ferrous in MgOH (low).""",
# The WMS name for the layer
"name": "aster_feoh_group_content",
# The Datacube name for the associated data product
"product_name": "aster_feoh_group_content",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"wcs_default_bands": ["Band_1"],
"legend": {
# "url": ""
"styles": ["ramp"]
},
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "(B6+B8)/B7 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "2.03"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "2.25"
}
}
],
"legend": {
"units": "Blue is low content,\nRed is high content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER FeOH Group Content
{
# Included as a keyword for the layer
"label": "Ferric Oxide Composition",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B2/B1
Blue-cyan is goethite rich,
Green is hematite-goethite,
Red-yellow is hematite-rich
Useful For:
(1) Mapping transported materials (including palaeochannels) characterised by hematite (relative to geothite). Combine with AlOH composition to find co-located areas of hematite and poorly ordered kaolin to map transported materials; and
(2) hematite-rish areas in drier conditions (eg above the water table) whereas goethite-rich in wetter conditions (eg at/below the water or areas recently exposed). May also be climate driven.""",
# The WMS name for the layer
"name": "aster_ferric_oxide_composition",
# The Datacube name for the associated data product
"product_name": "aster_ferric_oxide_composition",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B2/B1 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "0.5"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "3.3"
}
}
],
"legend": {
"units": "Blue-cyan is non-hematitie,\nRed-yellow is hematite-rich",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Ferric Oxide Composition
{
# Included as a keyword for the layer
"label": "Ferric Oxide Content",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B4/B3
Blue is low abundance,
Red is high abundance
Useful for:
(1) Exposed iron ore (hematite-goethite).Use in combination with the “Opaques index” to help separate/map dark (a) surface lags (e.g. maghemite gravels) which can be misidentified in visible and false colour imagery; and (b) magnetite in BIF and/or bedded iron ore; and
(2) Acid conditions: combine with FeOH Group content to help map jarosite which will have high values in both products.
Mapping hematite versus goethite mapping is NOT easily achieved as ASTER’s spectral bands were not designed to capture diagnostic iron oxide spectral behaviour.
However, some information on visible colour relating in part to differences in hematite and/or goethite content can be obtained using a ratio of B2/B1 especially when this is masked using a B4/B3 to locate those pixels with sufficient iro oxide content.
""",
# The WMS name for the layer
"name": "aster_ferric_oxide_content",
# The Datacube name for the associated data product
"product_name": "aster_ferric_oxide_content",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B4/B3 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "1.1"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "2.1"
}
}
],
"legend": {
"units": "Blue is low abundance,\nRed is high abundance",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Ferric Oxide Content
{
# Included as a keyword for the layer
"label": "Ferrous Iron Content in MgOH",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B5/B4
Blue is low ferrous iron content in carbonate and MgOH minerals like talc and tremolite.
Red is high ferrous iron content in carbonate and MgOH minerals like chlorite and actinolite.
Useful for mapping:
(1) un-oxidised “parent rocks” – i.e. mapping exposed parent rock materials (warm colours) in transported cover;
(2) talc/tremolite (Mg-rich – cool colours) versus actinolite (Fe-rich – warm colours);
(3) ferrous-bearing carbonates (warm colours) potentially associated with metasomatic “alteration”;
(4) calcite/dolomite which are ferrous iron-poor (cool colours); and
(5) epidote, which is ferrous iron poor (cool colours) – in combination with FeOH content product (high).""",
# The WMS name for the layer
"name": "aster_ferrous_iron_content_in_mgoh",
# The Datacube name for the associated data product
"product_name": "aster_ferrous_iron_content_in_mgoh",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B5/B4 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "0.1"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "2.0"
}
}
],
"legend": {
"units": "Blue is low ferrous iron content,\nRed is high ferrous iron content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Ferrous Iron Content in MgOH
{
# Included as a keyword for the layer
"label": "Ferrous Iron Index",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B5/B4
Blue is low abundance,
Red is high abundance
This product can help map exposed “fresh” (un-oxidised) rocks (warm colours) especially mafic and ultramafic lithologies rich in ferrous silicates (e.g. actinolite, chlorite) and/or ferrous carbonates (e.g. ferroan dolomite, ankerite, siderite).
Applying an MgOH Group content mask to this product helps to isolate ferrous bearing non-OH bearing minerals like pyroxenes (e.g. jadeite) from OH-bearing or carbonate-bearing ferrous minerals like actinolite or ankerite, respectively.
Also combine with the FeOH Group content product to find evidence for ferrous-bearing chlorite (e.g. chamosite).
""",
# The WMS name for the layer
"name": "aster_ferrous_iron_index",
# The Datacube name for the associated data product
"product_name": "aster_ferrous_iron_index",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B5/B4 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "0.75"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "1.025"
}
}
],
"legend": {
"units": "Blue is low abundance,\nRed is high abundance",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Ferrous Iron Index
{
# Included as a keyword for the layer
"label": "Green Vegetation",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B3/B2
Blue is low content,
Red is high content
Use this image to help interpret the amount of “obscuring/complicating” green vegetation cover.""",
# The WMS name for the layer
"name": "aster_green_vegetation",
# The Datacube name for the associated data product
"product_name": "aster_green_vegetation",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B3/B2 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "1.4"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "4"
}
}
],
"legend": {
"units": "Blue is low content,\nRed is high content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Green Vegetation
{
# Included as a keyword for the layer
"label": "Gypsum Index",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band Ratio: (B10+B12)/B11
Blue is low gypsum content,
Red is high gypsum content
Useful for mapping:
(1) evaporative environments (e.g. salt lakes) and associated arid aeolian systems (e.g. dunes);
(2) acid waters (e.g. from oxidising sulphides) invading carbonate rich materials including around mine environments; and
(3) hydrothermal (e.g. volcanic) systems.""",
# The WMS name for the layer
"name": "aster_gypsum_index",
# The Datacube name for the associated data product
"product_name": "aster_gypsum_index",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "(B10+B12)/B11 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "0.47"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "0.5"
}
}
],
"legend": {
"units": "Blue is low content,\nRed is high content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Gypsum Index
{
# Included as a keyword for the layer
"label": "Kaolin Group Index",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band Ratio: B6/B5
Blue is low content,
Red is high content
(potentially includes: pyrophyllite, alunite, well-ordered kaolinite)
Useful for mapping:
(1) different clay-type stratigraphic horizons;
(2) lithology-overprinting hydrothermal alteration, e.g. high sulphidation, “advanced argillic” alteration comprising pyrophyllite, alunite, kaolinite/dickite; and
(3) well-ordered kaolinite (warmer colours) versus poorly-ordered kaolinite (cooler colours) which can be used for mapping in situ versus transported materials, respectively.""",
# The WMS name for the layer
"name": "aster_kaolin_group_index",
# The Datacube name for the associated data product
"product_name": "aster_kaolin_group_index",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B6/B5 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "1.0"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "1.125"
}
}
],
"legend": {
"units": "Blue is low content,\nRed is high content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Kaolin Group Index
{
# Included as a keyword for the layer
"label": "MgOH Group Composition",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B7/B8
Blue-cyan is magnesite-dolomite, amphibole, chlorite
Red is calcite, epidote, amphibole
Useful for mapping:
(1) exposed saprolite/saprock is often white mica or Al-smectite (warmer colours) whereas transported materials are often kaolin-rich (cooler colours);
(2) clays developed over carbonates, especially Al-smectite (montmorillonite, beidellite) will produce middle to warmers colours.
(3) stratigraphic mapping based on different clay-types; and
(4) lithology-overprinting hydrothermal alteration, e.g. Si-rich and K-rich phengitic mica (warmer colours).
Combine with Ferrous iron in MgOH and FeOH content products to look for evidence of overlapping/juxtaposed potassic metasomatism in ferromagnesian parents rocks (e.g. Archaean greenstone associated Au mineralisation) +/- associated distal propyllitic alteration (e.g. chlorite, amphibole).""",
# The WMS name for the layer
"name": "aster_mgoh_group_composition",
# The Datacube name for the associated data product
"product_name": "aster_mgoh_group_composition",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B7/B8 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "0.6"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "1.4"
}
}
],
"legend": {
"units": "Blue-cyan is magnesite-dolomite, amphibole, \nRed is calcite, epidote, amphibole",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER MgOH Group Composition
{
# Included as a keyword for the layer
"label": "MgOH Group Content",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: (B6+B9/(B7+B8)
Blue is low content,
Red is high content
(potentially includes: calcite, dolomite, magnesite, chlorite, epidote, amphibole, talc, serpentine)
Useful for mapping:
(1) “hydrated” ferromagnesian rocks rich in OH-bearing tri-octahedral silicates like actinolite, serpentine, chlorite and talc;
(2) carbonate-rich rocks, including shelf (palaeo-reef) and valley carbonates(calcretes, dolocretes and magnecretes); and
(3) lithology-overprinting hydrothermal alteration, e.g. “propyllitic alteration” comprising chlorite, amphibole and carbonate.
The nature (composition) of the silicate or carbonate mineral can be further assessed using the MgOH composition product.""",
# The WMS name for the layer
"name": "aster_mgoh_group_content",
# The Datacube name for the associated data product
"product_name": "aster_mgoh_group_content",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "(B6+B9/(B7+B8) ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "1.05"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "1.2"
}
}
],
"legend": {
"units": "Blue low content,\nRed is high content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER MgOH Group Content
{
# Included as a keyword for the layer
"label": "Opaque Index",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B1/B4
Blue is low abundance,
Red is high abundance
(potentially includes carbon black (e.g. ash), magnetite, Mn oxides, and sulphides in unoxidised envornments
Useful for mapping:
(1) magnetite-bearing rocks (e.g. BIF);
(2) maghemite gravels;
(3) manganese oxides;
(4) graphitic shales.
Note 1: (1) and (4) above can be evidence for “reduced” rocks when interpreting REDOX gradients.
Combine with AlOH group Content (high values) and Composition (high values) products, to find evidence for any invading “oxidised” hydrothermal fluids which may have interacted with reduced rocks evident in the Opaques index product.""",
# The WMS name for the layer
"name": "aster_opaque_index",
# The Datacube name for the associated data product
"product_name": "aster_opaque_index",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B1/B4 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "0.4"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "0.9"
}
}
],
"legend": {
"units": "Blue low content,\nRed is high content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Opaque Index
{
# Included as a keyword for the layer
"label": "Silica Index",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B13/B10
Blue is low silica content,
Red is high silica content
(potentially includes Si-rich minerals, such as quartz, feldspars, Al-clays)
Geoscience Applications:
Broadly equates to the silica content though the intensity (depth) of this reststrahlen feature is also affected by particle size <250 micron.
Useful product for mapping:
(1) colluvial/alluvial materials;
(2) silica-rich (quartz) sediments (e.g. quartzites);
(3) silification and silcretes; and
(4) quartz veins.
Use in combination with quartz index, which is often correlated with the Silica index.""",
# The WMS name for the layer
"name": "aster_silica_index",
# The Datacube name for the associated data product
"product_name": "aster_silica_index",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B13/B10 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "1.0"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "1.35"
}
}
],
"legend": {
"units": "Blue low silica content,\nRed is high silica content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Silica Index
{
# Included as a keyword for the layer
"label": "Quartz Index",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B11/(B10+B12)
Blue is low quartz content,
Red is high quartz content
Geoscience Applications:
Use in combination with Silica index to more accurately map “crystalline” quartz rather than poorly ordered silica (e.g. opal), feldspars and compacted clays.""",
# The WMS name for the layer
"name": "aster_quartz_index",
# The Datacube name for the associated data product
"product_name": "aster_quartz_index",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B11/(B10+B12) ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "0.50"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "0.52"
}
}
],
"legend": {
"units": "Blue low quartz content,\nRed is high quartz content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Quartz Index
],
}
]
| 49.240012
| 851
| 0.389774
| 38,593
| 473,295
| 4.707693
| 0.050631
| 0.010634
| 0.010898
| 0.013375
| 0.88627
| 0.879698
| 0.872185
| 0.869025
| 0.863956
| 0.858887
| 0
| 0.043988
| 0.530145
| 473,295
| 9,611
| 852
| 49.245136
| 0.772947
| 0.143838
| 0
| 0.568653
| 0
| 0.028426
| 0.352009
| 0.015313
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.000261
| 0
| 0.000261
| 0.001695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
132b44565eed82ad8e8af2920ca03d221f09b5cd
| 35,977
|
py
|
Python
|
packages/python/plotly/plotly/express/_chart_types.py
|
tr8dr/plotly.py
|
5c3c985c474f54ad039ff285086d7a83e9864592
|
[
"MIT"
] | 1
|
2021-03-30T17:17:13.000Z
|
2021-03-30T17:17:13.000Z
|
packages/python/plotly/plotly/express/_chart_types.py
|
tr8dr/plotly.py
|
5c3c985c474f54ad039ff285086d7a83e9864592
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/express/_chart_types.py
|
tr8dr/plotly.py
|
5c3c985c474f54ad039ff285086d7a83e9864592
|
[
"MIT"
] | null | null | null |
from ._core import make_figure
from ._doc import make_docstring
import plotly.graph_objs as go
_wide_mode_xy_append = [
"Either `x` or `y` can optionally be a list of column references or array_likes, ",
"in which case the data will be treated as if it were 'wide' rather than 'long'.",
]
_cartesian_append_dict = dict(x=_wide_mode_xy_append, y=_wide_mode_xy_append)
def scatter(
data_frame=None,
x=None,
y=None,
color=None,
symbol=None,
size=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
size_max=None,
marginal_x=None,
marginal_y=None,
trendline=None,
trendline_color_override=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
render_mode="auto",
title=None,
template=None,
width=None,
height=None,
):
"""
In a scatter plot, each row of `data_frame` is represented by a symbol
mark in 2D space.
"""
return make_figure(args=locals(), constructor=go.Scatter)
scatter.__doc__ = make_docstring(scatter, append_dict=_cartesian_append_dict)
def density_contour(
data_frame=None,
x=None,
y=None,
z=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_discrete_sequence=None,
color_discrete_map=None,
marginal_x=None,
marginal_y=None,
trendline=None,
trendline_color_override=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
histfunc=None,
histnorm=None,
nbinsx=None,
nbinsy=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a density contour plot, rows of `data_frame` are grouped together
into contour marks to visualize the 2D distribution of an aggregate
function `histfunc` (e.g. the count or sum) of the value `z`.
"""
return make_figure(
args=locals(),
constructor=go.Histogram2dContour,
trace_patch=dict(
contours=dict(coloring="none"),
histfunc=histfunc,
histnorm=histnorm,
nbinsx=nbinsx,
nbinsy=nbinsy,
xbingroup="x",
ybingroup="y",
),
)
density_contour.__doc__ = make_docstring(
density_contour,
append_dict=dict(
x=_wide_mode_xy_append,
y=_wide_mode_xy_append,
z=[
"For `density_heatmap` and `density_contour` these values are used as the inputs to `histfunc`.",
],
histfunc=["The arguments to this function are the values of `z`."],
),
)
def density_heatmap(
data_frame=None,
x=None,
y=None,
z=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
marginal_x=None,
marginal_y=None,
opacity=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
histfunc=None,
histnorm=None,
nbinsx=None,
nbinsy=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a density heatmap, rows of `data_frame` are grouped together into
colored rectangular tiles to visualize the 2D distribution of an
aggregate function `histfunc` (e.g. the count or sum) of the value `z`.
"""
return make_figure(
args=locals(),
constructor=go.Histogram2d,
trace_patch=dict(
histfunc=histfunc,
histnorm=histnorm,
nbinsx=nbinsx,
nbinsy=nbinsy,
xbingroup="x",
ybingroup="y",
),
)
density_heatmap.__doc__ = make_docstring(
density_heatmap,
append_dict=dict(
x=_wide_mode_xy_append,
y=_wide_mode_xy_append,
z=[
"For `density_heatmap` and `density_contour` these values are used as the inputs to `histfunc`.",
],
histfunc=["The arguments to this function are the values of `z`.",],
),
)
def line(
data_frame=None,
x=None,
y=None,
line_group=None,
color=None,
line_dash=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
line_shape=None,
render_mode="auto",
title=None,
template=None,
width=None,
height=None,
):
"""
In a 2D line plot, each row of `data_frame` is represented as vertex of
a polyline mark in 2D space.
"""
return make_figure(args=locals(), constructor=go.Scatter)
line.__doc__ = make_docstring(line, append_dict=_cartesian_append_dict)
def area(
data_frame=None,
x=None,
y=None,
line_group=None,
color=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
orientation=None,
groupnorm=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
line_shape=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a stacked area plot, each row of `data_frame` is represented as
vertex of a polyline mark in 2D space. The area between successive
polylines is filled.
"""
return make_figure(
args=locals(),
constructor=go.Scatter,
trace_patch=dict(stackgroup=1, mode="lines", groupnorm=groupnorm),
)
area.__doc__ = make_docstring(area, append_dict=_cartesian_append_dict)
def bar(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
base=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
opacity=None,
orientation=None,
barmode="relative",
log_x=False,
log_y=False,
range_x=None,
range_y=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a bar plot, each row of `data_frame` is represented as a rectangular
mark.
"""
return make_figure(
args=locals(),
constructor=go.Bar,
trace_patch=dict(textposition="auto"),
layout_patch=dict(barmode=barmode),
)
bar.__doc__ = make_docstring(bar, append_dict=_cartesian_append_dict)
def timeline(
data_frame=None,
x_start=None,
x_end=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
opacity=None,
range_x=None,
range_y=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a timeline plot, each row of `data_frame` is represented as a rectangular
mark on an x axis of type `date`, spanning from `x_start` to `x_end`.
"""
return make_figure(
args=locals(),
constructor="timeline",
trace_patch=dict(textposition="auto", orientation="h"),
layout_patch=dict(barmode="overlay"),
)
timeline.__doc__ = make_docstring(timeline)
def histogram(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
marginal=None,
opacity=None,
orientation=None,
barmode="relative",
barnorm=None,
histnorm=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
histfunc=None,
cumulative=None,
nbins=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a histogram, rows of `data_frame` are grouped together into a
rectangular mark to visualize the 1D distribution of an aggregate
function `histfunc` (e.g. the count or sum) of the value `y` (or `x` if
`orientation` is `'h'`).
"""
return make_figure(
args=locals(),
constructor=go.Histogram,
trace_patch=dict(
histnorm=histnorm, histfunc=histfunc, cumulative=dict(enabled=cumulative),
),
layout_patch=dict(barmode=barmode, barnorm=barnorm),
)
histogram.__doc__ = make_docstring(
histogram,
append_dict=dict(
x=["If `orientation` is `'h'`, these values are used as inputs to `histfunc`."]
+ _wide_mode_xy_append,
y=["If `orientation` is `'v'`, these values are used as inputs to `histfunc`."]
+ _wide_mode_xy_append,
histfunc=[
"The arguments to this function are the values of `y`(`x`) if `orientation` is `'v'`(`'h'`).",
],
),
)
def violin(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
orientation=None,
violinmode=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
points=None,
box=False,
title=None,
template=None,
width=None,
height=None,
):
"""
In a violin plot, rows of `data_frame` are grouped together into a
curved mark to visualize their distribution.
"""
return make_figure(
args=locals(),
constructor=go.Violin,
trace_patch=dict(
points=points, box=dict(visible=box), scalegroup=True, x0=" ", y0=" ",
),
layout_patch=dict(violinmode=violinmode),
)
violin.__doc__ = make_docstring(violin, append_dict=_cartesian_append_dict)
def box(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
orientation=None,
boxmode=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
points=None,
notched=False,
title=None,
template=None,
width=None,
height=None,
):
"""
In a box plot, rows of `data_frame` are grouped together into a
box-and-whisker mark to visualize their distribution.
Each box spans from quartile 1 (Q1) to quartile 3 (Q3). The second
quartile (Q2) is marked by a line inside the box. By default, the
whiskers correspond to the box' edges +/- 1.5 times the interquartile
range (IQR: Q3-Q1), see "points" for other options.
"""
return make_figure(
args=locals(),
constructor=go.Box,
trace_patch=dict(boxpoints=points, notched=notched, x0=" ", y0=" "),
layout_patch=dict(boxmode=boxmode),
)
box.__doc__ = make_docstring(box, append_dict=_cartesian_append_dict)
def strip(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
orientation=None,
stripmode=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a strip plot each row of `data_frame` is represented as a jittered
mark within categories.
"""
return make_figure(
args=locals(),
constructor=go.Box,
trace_patch=dict(
boxpoints="all",
pointpos=0,
hoveron="points",
fillcolor="rgba(255,255,255,0)",
line={"color": "rgba(255,255,255,0)"},
x0=" ",
y0=" ",
),
layout_patch=dict(boxmode=stripmode),
)
strip.__doc__ = make_docstring(strip, append_dict=_cartesian_append_dict)
def scatter_3d(
data_frame=None,
x=None,
y=None,
z=None,
color=None,
symbol=None,
size=None,
text=None,
hover_name=None,
hover_data=None,
custom_data=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
error_z=None,
error_z_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
size_max=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
log_x=False,
log_y=False,
log_z=False,
range_x=None,
range_y=None,
range_z=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a 3D scatter plot, each row of `data_frame` is represented by a
symbol mark in 3D space.
"""
return make_figure(args=locals(), constructor=go.Scatter3d)
scatter_3d.__doc__ = make_docstring(scatter_3d)
def line_3d(
data_frame=None,
x=None,
y=None,
z=None,
color=None,
line_dash=None,
text=None,
line_group=None,
hover_name=None,
hover_data=None,
custom_data=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
error_z=None,
error_z_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
log_x=False,
log_y=False,
log_z=False,
range_x=None,
range_y=None,
range_z=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a 3D line plot, each row of `data_frame` is represented as vertex of
a polyline mark in 3D space.
"""
return make_figure(args=locals(), constructor=go.Scatter3d)
line_3d.__doc__ = make_docstring(line_3d)
def scatter_ternary(
data_frame=None,
a=None,
b=None,
c=None,
color=None,
symbol=None,
size=None,
text=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
size_max=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a ternary scatter plot, each row of `data_frame` is represented by a
symbol mark in ternary coordinates.
"""
return make_figure(args=locals(), constructor=go.Scatterternary)
scatter_ternary.__doc__ = make_docstring(scatter_ternary)
def line_ternary(
data_frame=None,
a=None,
b=None,
c=None,
color=None,
line_dash=None,
line_group=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
line_shape=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a ternary line plot, each row of `data_frame` is represented as
vertex of a polyline mark in ternary coordinates.
"""
return make_figure(args=locals(), constructor=go.Scatterternary)
line_ternary.__doc__ = make_docstring(line_ternary)
def scatter_polar(
data_frame=None,
r=None,
theta=None,
color=None,
symbol=None,
size=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
direction="clockwise",
start_angle=90,
size_max=None,
range_r=None,
range_theta=None,
log_r=False,
render_mode="auto",
title=None,
template=None,
width=None,
height=None,
):
"""
In a polar scatter plot, each row of `data_frame` is represented by a
symbol mark in polar coordinates.
"""
return make_figure(args=locals(), constructor=go.Scatterpolar)
scatter_polar.__doc__ = make_docstring(scatter_polar)
def line_polar(
data_frame=None,
r=None,
theta=None,
color=None,
line_dash=None,
hover_name=None,
hover_data=None,
custom_data=None,
line_group=None,
text=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
direction="clockwise",
start_angle=90,
line_close=False,
line_shape=None,
render_mode="auto",
range_r=None,
range_theta=None,
log_r=False,
title=None,
template=None,
width=None,
height=None,
):
"""
In a polar line plot, each row of `data_frame` is represented as vertex
of a polyline mark in polar coordinates.
"""
return make_figure(args=locals(), constructor=go.Scatterpolar)
line_polar.__doc__ = make_docstring(line_polar)
def bar_polar(
data_frame=None,
r=None,
theta=None,
color=None,
hover_name=None,
hover_data=None,
custom_data=None,
base=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
barnorm=None,
barmode="relative",
direction="clockwise",
start_angle=90,
range_r=None,
range_theta=None,
log_r=False,
title=None,
template=None,
width=None,
height=None,
):
"""
In a polar bar plot, each row of `data_frame` is represented as a wedge
mark in polar coordinates.
"""
return make_figure(
args=locals(),
constructor=go.Barpolar,
layout_patch=dict(barnorm=barnorm, barmode=barmode),
)
bar_polar.__doc__ = make_docstring(bar_polar)
def choropleth(
data_frame=None,
lat=None,
lon=None,
locations=None,
locationmode=None,
geojson=None,
featureidkey=None,
color=None,
facet_row=None,
facet_col=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
projection=None,
scope=None,
center=None,
fitbounds=None,
basemap_visible=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a choropleth map, each row of `data_frame` is represented by a
colored region mark on a map.
"""
return make_figure(
args=locals(),
constructor=go.Choropleth,
trace_patch=dict(locationmode=locationmode),
)
choropleth.__doc__ = make_docstring(choropleth)
def scatter_geo(
data_frame=None,
lat=None,
lon=None,
locations=None,
locationmode=None,
geojson=None,
featureidkey=None,
color=None,
text=None,
symbol=None,
facet_row=None,
facet_col=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
size=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
size_max=None,
projection=None,
scope=None,
center=None,
fitbounds=None,
basemap_visible=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a geographic scatter plot, each row of `data_frame` is represented
by a symbol mark on a map.
"""
return make_figure(
args=locals(),
constructor=go.Scattergeo,
trace_patch=dict(locationmode=locationmode),
)
scatter_geo.__doc__ = make_docstring(scatter_geo)
def line_geo(
data_frame=None,
lat=None,
lon=None,
locations=None,
locationmode=None,
geojson=None,
featureidkey=None,
color=None,
line_dash=None,
text=None,
facet_row=None,
facet_col=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
line_group=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
projection=None,
scope=None,
center=None,
fitbounds=None,
basemap_visible=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a geographic line plot, each row of `data_frame` is represented as
vertex of a polyline mark on a map.
"""
return make_figure(
args=locals(),
constructor=go.Scattergeo,
trace_patch=dict(locationmode=locationmode),
)
line_geo.__doc__ = make_docstring(line_geo)
def scatter_mapbox(
data_frame=None,
lat=None,
lon=None,
color=None,
text=None,
hover_name=None,
hover_data=None,
custom_data=None,
size=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
opacity=None,
size_max=None,
zoom=8,
center=None,
mapbox_style=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a Mapbox scatter plot, each row of `data_frame` is represented by a
symbol mark on a Mapbox map.
"""
return make_figure(args=locals(), constructor=go.Scattermapbox)
scatter_mapbox.__doc__ = make_docstring(scatter_mapbox)
def choropleth_mapbox(
data_frame=None,
geojson=None,
featureidkey=None,
locations=None,
color=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
opacity=None,
zoom=8,
center=None,
mapbox_style=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a Mapbox choropleth map, each row of `data_frame` is represented by a
colored region on a Mapbox map.
"""
return make_figure(args=locals(), constructor=go.Choroplethmapbox)
choropleth_mapbox.__doc__ = make_docstring(choropleth_mapbox)
def density_mapbox(
data_frame=None,
lat=None,
lon=None,
z=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
opacity=None,
zoom=8,
center=None,
mapbox_style=None,
radius=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a Mapbox density map, each row of `data_frame` contributes to the intensity of
the color of the region around the corresponding point on the map
"""
return make_figure(
args=locals(), constructor=go.Densitymapbox, trace_patch=dict(radius=radius)
)
density_mapbox.__doc__ = make_docstring(density_mapbox)
def line_mapbox(
data_frame=None,
lat=None,
lon=None,
color=None,
text=None,
hover_name=None,
hover_data=None,
custom_data=None,
line_group=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
zoom=8,
center=None,
mapbox_style=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a Mapbox line plot, each row of `data_frame` is represented as
vertex of a polyline mark on a Mapbox map.
"""
return make_figure(args=locals(), constructor=go.Scattermapbox)
line_mapbox.__doc__ = make_docstring(line_mapbox)
def scatter_matrix(
data_frame=None,
dimensions=None,
color=None,
symbol=None,
size=None,
hover_name=None,
hover_data=None,
custom_data=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
size_max=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a scatter plot matrix (or SPLOM), each row of `data_frame` is
represented by a multiple symbol marks, one in each cell of a grid of
2D scatter plots, which plot each pair of `dimensions` against each
other.
"""
return make_figure(
args=locals(), constructor=go.Splom, layout_patch=dict(dragmode="select")
)
scatter_matrix.__doc__ = make_docstring(scatter_matrix)
def parallel_coordinates(
data_frame=None,
dimensions=None,
color=None,
labels=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a parallel coordinates plot, each row of `data_frame` is represented
by a polyline mark which traverses a set of parallel axes, one for each
of the `dimensions`.
"""
return make_figure(args=locals(), constructor=go.Parcoords)
parallel_coordinates.__doc__ = make_docstring(parallel_coordinates)
def parallel_categories(
data_frame=None,
dimensions=None,
color=None,
labels=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
title=None,
template=None,
width=None,
height=None,
dimensions_max_cardinality=50,
):
"""
In a parallel categories (or parallel sets) plot, each row of
`data_frame` is grouped with other rows that share the same values of
`dimensions` and then plotted as a polyline mark through a set of
parallel axes, one for each of the `dimensions`.
"""
return make_figure(args=locals(), constructor=go.Parcats)
parallel_categories.__doc__ = make_docstring(parallel_categories)
def pie(
data_frame=None,
names=None,
values=None,
color=None,
color_discrete_sequence=None,
color_discrete_map=None,
hover_name=None,
hover_data=None,
custom_data=None,
labels=None,
title=None,
template=None,
width=None,
height=None,
opacity=None,
hole=None,
):
"""
In a pie plot, each row of `data_frame` is represented as a sector of a
pie.
"""
if color_discrete_sequence is not None:
layout_patch = {"piecolorway": color_discrete_sequence}
else:
layout_patch = {}
return make_figure(
args=locals(),
constructor=go.Pie,
trace_patch=dict(showlegend=(names is not None), hole=hole),
layout_patch=layout_patch,
)
pie.__doc__ = make_docstring(
pie,
override_dict=dict(
hole=[
"float",
"Sets the fraction of the radius to cut out of the pie."
"Use this to make a donut chart.",
],
),
)
def sunburst(
data_frame=None,
names=None,
values=None,
parents=None,
path=None,
ids=None,
color=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
color_discrete_sequence=None,
color_discrete_map=None,
hover_name=None,
hover_data=None,
custom_data=None,
labels=None,
title=None,
template=None,
width=None,
height=None,
branchvalues=None,
maxdepth=None,
):
"""
A sunburst plot represents hierarchial data as sectors laid out over
several levels of concentric rings.
"""
if color_discrete_sequence is not None:
layout_patch = {"sunburstcolorway": color_discrete_sequence}
else:
layout_patch = {}
if path is not None and (ids is not None or parents is not None):
raise ValueError(
"Either `path` should be provided, or `ids` and `parents`."
"These parameters are mutually exclusive and cannot be passed together."
)
if path is not None and branchvalues is None:
branchvalues = "total"
return make_figure(
args=locals(),
constructor=go.Sunburst,
trace_patch=dict(branchvalues=branchvalues, maxdepth=maxdepth),
layout_patch=layout_patch,
)
sunburst.__doc__ = make_docstring(sunburst)
def treemap(
data_frame=None,
names=None,
values=None,
parents=None,
ids=None,
path=None,
color=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
color_discrete_sequence=None,
color_discrete_map=None,
hover_name=None,
hover_data=None,
custom_data=None,
labels=None,
title=None,
template=None,
width=None,
height=None,
branchvalues=None,
maxdepth=None,
):
"""
A treemap plot represents hierarchial data as nested rectangular
sectors.
"""
if color_discrete_sequence is not None:
layout_patch = {"treemapcolorway": color_discrete_sequence}
else:
layout_patch = {}
if path is not None and (ids is not None or parents is not None):
raise ValueError(
"Either `path` should be provided, or `ids` and `parents`."
"These parameters are mutually exclusive and cannot be passed together."
)
if path is not None and branchvalues is None:
branchvalues = "total"
return make_figure(
args=locals(),
constructor=go.Treemap,
trace_patch=dict(branchvalues=branchvalues, maxdepth=maxdepth),
layout_patch=layout_patch,
)
treemap.__doc__ = make_docstring(treemap)
def funnel(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_col=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
opacity=None,
orientation=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a funnel plot, each row of `data_frame` is represented as a
rectangular sector of a funnel.
"""
return make_figure(args=locals(), constructor=go.Funnel)
funnel.__doc__ = make_docstring(funnel, append_dict=_cartesian_append_dict)
def funnel_area(
data_frame=None,
names=None,
values=None,
color=None,
color_discrete_sequence=None,
color_discrete_map=None,
hover_name=None,
hover_data=None,
custom_data=None,
labels=None,
title=None,
template=None,
width=None,
height=None,
opacity=None,
):
"""
In a funnel area plot, each row of `data_frame` is represented as a
trapezoidal sector of a funnel.
"""
if color_discrete_sequence is not None:
layout_patch = {"funnelareacolorway": color_discrete_sequence}
else:
layout_patch = {}
return make_figure(
args=locals(),
constructor=go.Funnelarea,
trace_patch=dict(showlegend=(names is not None)),
layout_patch=layout_patch,
)
funnel_area.__doc__ = make_docstring(funnel_area)
| 22.886132
| 109
| 0.663452
| 4,702
| 35,977
| 4.823479
| 0.075287
| 0.049603
| 0.043474
| 0.030556
| 0.832496
| 0.811949
| 0.791314
| 0.776367
| 0.764638
| 0.744444
| 0
| 0.003149
| 0.240987
| 35,977
| 1,571
| 110
| 22.9007
| 0.827407
| 0.120911
| 0
| 0.864446
| 0
| 0.000775
| 0.041108
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025562
| false
| 0.001549
| 0.002324
| 0
| 0.053447
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
133899e7057e6fe43f103a2956eefd84d45a77cd
| 27,847
|
py
|
Python
|
code/scipy2017/time_procedures.py
|
EVS-ATMOS/cmdv-rrm-anl
|
fde36aa21037bcdf396df0517d463fb4107fab82
|
[
"MIT"
] | null | null | null |
code/scipy2017/time_procedures.py
|
EVS-ATMOS/cmdv-rrm-anl
|
fde36aa21037bcdf396df0517d463fb4107fab82
|
[
"MIT"
] | 4
|
2016-10-13T16:31:32.000Z
|
2018-11-30T15:01:14.000Z
|
code/scipy2017/time_procedures.py
|
rcjackson/cmdv-rrm-anl
|
1d73d2dc2cb3b86de43c817fe340d0b550e2e04b
|
[
"MIT"
] | 2
|
2016-10-13T15:42:57.000Z
|
2016-10-13T16:39:16.000Z
|
# This module handles all of the time lookups for soundings and radar
# All of the file_name_str entries will have to be adjusted to fit to
# your radar dataset's naming convention
import glob
import numpy as np
import math
import matplotlib
matplotlib.use('agg')
import pyart
import time
from copy import deepcopy
data_path_berr = ('/lcrc/group/earthscience/radar/stage/radar_disk_two' +
'/berr_rapic/')
out_data_path = '/lcrc/group/earthscience/rjackson/multidop_grids/'
cpol_grid_data_path = '/lcrc/group/earthscience/rjackson/data/radar/grids/'
data_path_sounding = '/lcrc/group/earthscience/rjackson/soundings/'
berr_data_file_path = ('/lcrc/group/earthscience/radar/stage/' +
'/radar_disk_two/berr_rapic/')
data_path_cpol = ('/lcrc/group/earthscience/radar/stage/radar_disk_two/' +
'/cpol_rapic/')
data_path_cpol_cfradial = '/lcrc/group/earthscience/rjackson/cpol'
data_path_berr_cfradial = '/lcrc/group/earthscience/rjackson/berr'
out_file_path = '/lcrc/group/earthscience/rjackson/quicklook_plots/'
# Get a Radar object given a time period in the CPOL dataset
def get_radar_from_berr(time):
from datetime import timedelta, datetime
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
file_name_str = (data_path_berr +
'BerrimaVol' +
year_str +
month_str +
day_str +
'_' +
hour_str +
minute_str +
second_str +
'_deal.uf')
radar = pyart.io.read(file_name_str)
return radar
""" get_grid_times_cpol
start_year = Start year of animation
start_month = Start month of animation
start_day = Start day of animation
start_hour = Start hour of animation
end_year = End year of animation
end_month = End month of animation
end_day = End day of animation
end_minute = End minute of animation
minute_interval = Interval in minutes between scans (default is 5)
This procedure acquires an array of Grid classes between
start_time and end_time """
def get_grid_times_cpol(start_year, start_month, start_day,
start_hour, start_minute, end_year,
end_month, end_day, end_hour,
end_minute, minute_interval=5):
from datetime import timedelta, datetime
start_time = datetime(start_year,
start_month,
start_day,
start_hour,
start_minute,
)
end_time = datetime(end_year,
end_month,
end_day,
end_hour,
end_minute,
)
deltatime = end_time - start_time
if(deltatime.seconds > 0 or deltatime.minute > 0):
no_days = deltatime.days + 1
else:
no_days = deltatime.days
if(start_day != end_day):
no_days = no_days + 1
days = range(0, no_days)
print('We are about to load grid files for ' + str(no_days) + ' days')
# Find the list of files for each day
cur_time = start_time
file_list = []
time_list = []
date_list_final = []
for i in days:
year_str = "%04d" % cur_time.year
day_str = "%02d" % cur_time.day
month_str = "%02d" % cur_time.month
dir_str = year_str + '/' + month_str + '/' + day_str + '/'
format_str = (cpol_grid_data_path + dir_str + 'cpol_' + year_str +
month_str + day_str + '*.nc')
print('Looking for files with format ' + format_str)
data_list = glob.glob(format_str)
if(len(data_list) > 0):
day = datetime(cur_time.year, cur_time.month, cur_time.day,
0, 0, 1)
date_list_final.append(day)
for j in range(0, len(data_list)):
file_list.append(data_list[j])
cur_time = cur_time + timedelta(days=1)
# Parse all of the dates and time in the interval
# and add them to the time list
past_time = []
for file_name in file_list:
date_str = file_name[-15:-3]
year_str = date_str[0:4]
month_str = date_str[4:6]
day_str = date_str[6:8]
hour_str = date_str[8:10]
minute_str = date_str[10:12]
cur_time = datetime(int(year_str), int(month_str), int(day_str),
int(hour_str), int(minute_str), 0)
time_list.append(cur_time)
# Sort time list and make sure time are at least xx min apart
time_list.sort()
time_list_sorted = deepcopy(time_list)
time_list_final = []
past_time = []
for times in time_list_sorted:
cur_time = times
if(past_time == []):
past_time = cur_time
if(cur_time - past_time >= timedelta(minutes=minute_interval) and
cur_time >= start_time and cur_time <= end_time):
time_list_final.append(cur_time)
past_time = cur_time
return time_list_final
""" get_radar_times_cpol
start_year = Start year of animation
start_month = Start month of animation
start_day = Start day of animation
start_hour = Start hour of animation
end_year = End year of animation
end_month = End month of animation
end_day = End day of animation
end_minute = End minute of animation
minute_interval = Interval in minutes between scans (default is 5)
This procedure acquires an array of Radar
classes between start_time and end_time. """
def get_radar_times_cpol(start_year, start_month, start_day,
start_hour, start_minute, end_year,
end_month, end_day, end_hour,
end_minute, minute_interval=1):
from datetime import timedelta, datetime
from parse import parse
start_time = datetime(start_year, start_month, start_day,
start_hour, start_minute,)
end_time = datetime(end_year, end_month, end_day,
end_hour, end_minute,)
deltatime = end_time - start_time
if(deltatime.seconds > 0 or deltatime.minute > 0):
no_days = deltatime.days + 1
else:
no_days = deltatime.days
if(start_day != end_day):
no_days = no_days + 1
days = range(0, no_days)
print(('We are about to load grid files for ' +
str(no_days) + ' days'))
# Find the list of files for each day
cur_time = start_time
file_list = []
time_list = []
date_list_final = []
for i in days:
year_str = "%04d" % cur_time.year
day_str = "%02d" % cur_time.day
month_str = "%02d" % cur_time.month
# Adjust to your dataset
if(cur_time.year > 2007):
format_str = (data_path_cpol_cfradial + '/' + year_str + '/' +
year_str + month_str + day_str + '/cfrad.' +
year_str + month_str + day_str + '*UNKNOWN_SUR.nc')
else:
format_str = (data_path_cpol_cfradial + '/' + year_str + '/' +
year_str + month_str + day_str + '/Gunn_pt*' +
year_str + month_str + day_str + '*ppi.nc')
print('Looking for files with format ' + format_str)
data_list = glob.glob(format_str)
if(len(data_list) > 0):
day = datetime(cur_time.year, cur_time.month, cur_time.day,
0, 0, 1)
date_list_final.append(day)
for j in range(0, len(data_list)):
file_list.append(data_list[j])
cur_time = cur_time + timedelta(days=1)
# Parse all of the dates and time in the interval and
# add them to the time list
past_time = []
for file_name in file_list:
if(not file_name[-6:] == 'ppi.nc'):
new_format_str = (data_path_cpol_cfradial +
'/{:d}/{:d}/' +
'cfrad.{:d}_{:d}.{:d}_to_{:d}_{:d}' +
'.{:d}_Gunn_Pt_v{:d}_UNKNOWN_SUR.nc')
print(file_name)
parameters = parse(new_format_str, file_name)
year_str = np.floor(parameters[2]/10000)
month_str = np.floor((parameters[2] - year_str*10000)/100)
day_str = np.floor(parameters[2] - year_str*10000 - month_str*100)
hour_str = np.floor(parameters[3]/10000)
minute_str = np.floor((parameters[3] - hour_str*10000)/100)
second_str = np.floor(parameters[3] - hour_str*10000 -
minute_str*100)
else:
date_str = file_name[-20:-6]
year_str = date_str[0:4]
month_str = date_str[4:6]
day_str = date_str[6:8]
hour_str = date_str[8:10]
minute_str = date_str[10:12]
second_str = date_str[12:14]
print(year_str)
cur_time = datetime(int(year_str), int(month_str), int(day_str),
int(hour_str), int(minute_str), int(second_str))
time_list.append(cur_time)
# Sort time list and make sure time are at least xx min apart
time_list.sort()
time_list_sorted = deepcopy(time_list)
time_list_final = []
past_time = []
for times in time_list_sorted:
cur_time = times
if(past_time == []):
past_time = cur_time
if(cur_time >= start_time and cur_time <= end_time):
time_list_final.append(cur_time)
past_time = cur_time
return time_list_final, date_list_final
""" get_radar_times_cpol
start_year = Start year of animation
start_month = Start month of animation
start_day = Start day of animation
start_hour = Start hour of animation
end_year = End year of animation
end_month = End month of animation
end_day = End day of animation
end_minute = End minute of animation
minute_interval = Interval in minutes between scans (default is 5)
This procedure acquires an array of Radar classes
between start_time and end_time """
def get_radar_times_berr(start_year, start_month, start_day,
start_hour, start_minute, end_year,
end_month, end_day, end_hour,
end_minute, minute_interval=5):
from datetime import timedelta, datetime
from parse import parse
start_time = datetime(start_year, start_month, start_day,
start_hour, start_minute,)
end_time = datetime(end_year, end_month, end_day, end_hour, end_minute,)
deltatime = end_time - start_time
if(deltatime.seconds > 0 or deltatime.minute > 0):
no_days = deltatime.days + 1
else:
no_days = deltatime.days
if(start_day != end_day):
no_days = no_days + 1
days = range(0, no_days)
print('We are about to load grid files for ' + str(no_days) + ' days')
# Find the list of files for each day
cur_time = start_time
file_list = []
time_list = []
date_list_final = []
for i in days:
year_str = "%04d" % cur_time.year
day_str = "%02d" % cur_time.day
month_str = "%02d" % cur_time.month
# Adjust to your dataset
format_str = (data_path_berr_cfradial +
'/' + year_str + '/' + year_str +
month_str + day_str + '/cfrad.' +
year_str + month_str + day_str + '*.nc')
print('Looking for files with format ' + format_str)
data_list = glob.glob(format_str)
if(len(data_list) > 0):
day = datetime(cur_time.year, cur_time.month, cur_time.day,
0, 0, 1)
date_list_final.append(day)
for j in range(0, len(data_list)):
file_list.append(data_list[j])
cur_time = cur_time + timedelta(days=1)
# Parse all of the dates and time in the interval
# and add them to the time list
past_time = []
for file_name in file_list:
if(file_name[-13:] == 'el0.50_SUR.nc'):
new_format_str = (data_path_berr_cfradial +
'/{:d}/{:d}/' +
'cfrad.{:d}_{:d}.{:d}_to_{:d}_{:d}.' +
'{:d}_Berr_v{:d}_s{:d}_el0.50_SUR.nc')
else:
new_format_str = (data_path_berr_cfradial +
'/{:d}/{:d}/' +
'cfrad.{:d}_{:d}.{:d}_to_{:d}_{:d}.' +
'{:d}_Berrima_v{:d}_UNKNOWN_SUR.nc')
parameters = parse(new_format_str, file_name)
year_str = np.floor(parameters[2]/10000)
month_str = np.floor((parameters[2] - year_str*10000)/100)
day_str = np.floor(parameters[2] - year_str*10000 - month_str*100)
hour_str = np.floor(parameters[3]/10000)
minute_str = np.floor((parameters[3] - hour_str*10000)/100)
second_str = np.floor(parameters[3] - hour_str*10000 - minute_str*100)
cur_time = datetime(int(year_str), int(month_str), int(day_str),
int(hour_str), int(minute_str), int(second_str))
# Only interested at scans at 10's of minutes since
# they are full volumes -- adjust for your dataset
if(int(minute_str) % 10 == 0):
time_list.append(cur_time)
# Sort time list and make sure time are at least xx min apart
time_list.sort()
time_list_sorted = deepcopy(time_list)
time_list_final = []
past_time = []
for times in time_list_sorted:
cur_time = times
if(past_time == []):
past_time = cur_time
if(cur_time >= start_time and cur_time <= end_time):
time_list_final.append(cur_time)
past_time = cur_time
return time_list_final, date_list_final
# Write to cfradial file given a time (useful for adding fields)
def write_radar_to_cpol(radar, time):
import glob
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
if(time.year > 2007):
file_name_str = (data_path_cpol_cfradial +
'/' + year_str + '/' + year_str + month_str +
day_str + '/' + 'cfrad.' + year_str + month_str +
day_str + '_' + hour_str + minute_str + '*.nc')
else:
file_name_str = (data_path_cpol_cfradial + '/' + year_str + '/' +
year_str + month_str + day_str + '/' + 'Gunn_pt_' +
year_str + month_str + day_str + hour_str +
minute_str + second_str + '*.nc')
file_name = glob.glob(file_name_str)
file_name = file_name[0]
pyart.io.write_cfradial(file_name, radar)
def write_radar_to_berr(radar, time):
import glob
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
file_name_str = (data_path_berr_cfradial +
'/' + year_str + '/' + year_str + month_str + day_str +
'/cfrad.' + year_str + month_str + day_str + '_' +
hour_str + minute_str + '*.nc')
file_name = glob.glob(file_name_str)
file_name = file_name[0]
pyart.io.write_cfradial(file_name, radar)
# Get a Radar object given a time period in the CPOL dataset
def get_radar_from_cpol(time):
from datetime import timedelta, datetime
import glob
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
if(time.year <= 2007):
file_name_str = (data_path_cpol_cfradial +
'/' + year_str + '/' + year_str +
month_str + day_str + '/Gunn_pt_' +
year_str + month_str + day_str +
hour_str + minute_str + '*.nc')
else:
file_name_str = (data_path_cpol_cfradial +
'/' + year_str + '/' + year_str + month_str +
day_str + '/cfrad.' + year_str + month_str +
day_str + '_' + hour_str + minute_str + '*.nc')
print('Opening ' + file_name_str)
file_name = glob.glob(file_name_str)
radar = pyart.io.read(file_name[0])
return radar
# Get a Radar object given a time period in the CPOL dataset
def get_radar_from_berr(time):
from datetime import timedelta, datetime
import glob
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
file_name_str = (data_path_berr_cfradial +
'/' + year_str + '/' + year_str + month_str +
day_str + '/cfrad.' + year_str + month_str +
day_str + '_' + hour_str + minute_str + '*.nc')
file_name = glob.glob(file_name_str)
print(file_name_str)
print(file_name)
radar = pyart.io.read(file_name[0])
return radar
# Get a Radar object given a time period in the CPOL dataset
def get_grid_from_cpol(time):
from datetime import timedelta, datetime
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
file_name_str = (cpol_grid_data_path +
'/' + year_str + '/' + month_str + '/' + day_str +
'/' + 'cpol_' + year_str + month_str + day_str +
hour_str + minute_str + '.nc')
print(file_name_str)
radar = pyart.io.read_grid(file_name_str)
return radar
def grid_radar(radar, grid_shape=(20, 301, 301), xlim=(-150000, 150000),
ylim=(-150000, 150000), zlim=(1000, 20000), bsp=1.0,
min_radius=750, h_factor=4.0, nb=1.5, gatefilter=False,
fields=['DT', 'VT'], origin=None):
bt = time.time()
radar_list = [radar]
if origin is None:
origin = (radar.latitude['data'][0],
radar.longitude['data'][0])
grid = pyart.map.grid_from_radars(
radar_list, grid_shape=grid_shape,
grid_limits=(zlim, ylim, xlim),
grid_origin=origin, fields=fields,
weighting_function='Cressman',
gridding_algo='map_gates_to_grid',
h_factor=h_factor,
min_radius=min_radius,
bsp=bsp,
nb=nb,
gatefilters=[gatefilter])
print(time.time() - bt, 'seconds to grid radar')
return grid
def find_nearest(array, value):
import numpy
idx = numpy.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or
math.fabs(value - array[idx-1]) <
math.fabs(value - array[idx])):
return idx-1
else:
return idx
""" get_sounding_times
start_year = Start year of animation
start_month = Start month of animation
start_day = Start day of animation
start_hour = Start hour of animation
end_year = End year of animation
end_month = End month of animation
end_day = End day of animation
end_minute = End minute of animation
minute_interval = Interval in minutes between scans (default is 5)
This procedure acquires an array of sounding times between
start_time and end_time. """
def get_sounding_times(start_year, start_month, start_day,
start_hour, start_minute, end_year,
end_month, end_day, end_hour,
end_minute, minute_interval=5):
from datetime import timedelta, datetime
start_time = datetime(start_year,
start_month,
start_day,
start_hour,
start_minute,
)
end_time = datetime(end_year,
end_month,
end_day,
end_hour,
end_minute,
)
deltatime = end_time - start_time
if(deltatime.seconds > 0 or deltatime.minutes > 0):
no_days = deltatime.days + 1
else:
no_days = deltatime.days
if(start_day != end_day):
no_days = no_days + 1
days = np.arange(0, no_days, 1)
print('We are about to load sounding files for ' + str(no_days) + ' days')
# Find the list of files for each day
cur_time = start_time
file_list = []
time_list = []
for i in days:
year_str = "%04d" % cur_time.year
day_str = "%02d" % cur_time.day
month_str = "%02d" % cur_time.month
format_str = (data_path_sounding +
'twpsondewnpnC3.b1.' +
year_str +
month_str +
day_str +
'*custom.cdf')
data_list = glob.glob(format_str)
for j in range(0, len(data_list)):
file_list.append(data_list[j])
cur_time = cur_time + timedelta(days=1)
# Parse all of the dates and time in the interval and add to list
past_time = []
for file_name in file_list:
date_str = file_name[-26:-11]
year_str = date_str[0:4]
month_str = date_str[4:6]
day_str = date_str[6:8]
hour_str = date_str[9:11]
minute_str = date_str[11:13]
second_str = date_str[13:15]
cur_time = datetime(int(year_str),
int(month_str),
int(day_str),
int(hour_str),
int(minute_str),
int(second_str))
time_list.append(cur_time)
# Sort time list and make sure time are at least xx min apart
time_list.sort()
time_list_sorted = deepcopy(time_list)
time_list_final = []
past_time = []
for times in time_list_sorted:
cur_time = times
if(past_time == []):
past_time = cur_time
if(cur_time - past_time >= timedelta(minutes=minute_interval) and
cur_time >= start_time and cur_time <= end_time):
time_list_final.append(cur_time)
past_time = cur_time
return time_list_final
# Get a sounding object given a time period in the CPOL dataset
def get_sounding(time):
from datetime import timedelta, datetime
from netCDF4 import Dataset
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
file_name_str = (data_path_sounding +
'twpsondewnpnC3.b1.' +
year_str +
month_str +
day_str +
'.' +
hour_str +
minute_str +
second_str +
'.custom.cdf')
sounding = Dataset(file_name_str, mode='r')
return sounding
# get_radar_times
# start_year = Start year of animation
# start_month = Start month of animation
# start_day = Start day of animation
# start_hour = Start hour of animation
# end_year = End year of animation
# end_month = End month of animation
# end_day = End day of animation
# end_minute = End minute of animation
# minute_interval = Interval in minutes between scans (default is 5)
# This procedure acquires a datetime array between start_time and end_time
def get_dda_times(start_year, start_month, start_day,
start_hour, start_minute, end_year,
end_month, end_day, end_hour,
end_minute, minute_interval=5):
from datetime import timedelta, datetime
start_time = datetime(start_year, start_month, start_day,
start_hour, start_minute, )
end_time = datetime(end_year, end_month, end_day, end_hour,
end_minute,)
deltatime = end_time - start_time
if(deltatime.seconds > 0 or deltatime.minute > 0):
no_days = deltatime.days + 1
else:
no_days = deltatime.days
if(start_day != end_day):
no_days = no_days + 1
days = np.arange(0, no_days, 1)
print('We are about to load grid files for ' + str(no_days) + ' days')
# Find the list of files for each day
cur_time = start_time
file_list = []
time_list = []
for i in days:
year_str = "%04d" % cur_time.year
day_str = "%02d" % cur_time.day
month_str = "%02d" % cur_time.month
format_str = (out_data_path +
'/ddop/cf_compliant_grid' +
year_str +
month_str +
day_str +
'*.nc')
print('Looking for files with format ' + format_str)
data_list = glob.glob(format_str)
for j in range(0, len(data_list)):
file_list.append(data_list[j])
cur_time = cur_time + timedelta(days=1)
# Parse all of the dates and time in the interval and add to list
past_time = []
for file_name in file_list:
date_str = file_name[-15:-3]
year_str = date_str[0:4]
month_str = date_str[4:6]
day_str = date_str[6:8]
hour_str = date_str[8:10]
minute_str = date_str[10:12]
second_str = '00'
cur_time = datetime(int(year_str), int(month_str), int(day_str),
int(hour_str), int(minute_str), 0)
time_list.append(cur_time)
# Sort time list and make sure time are at least xx min apart
time_list.sort()
time_list_sorted = deepcopy(time_list)
time_list_final = []
past_time = []
for times in time_list_sorted:
cur_time = times
if(past_time == []):
past_time = cur_time
if(cur_time - past_time >= timedelta(minutes=minute_interval) and
cur_time >= start_time and cur_time <= end_time):
time_list_final.append(cur_time)
past_time = cur_time
return time_list_final
# Get a Grid object given a time period in the Multidop dataset
def get_grid_from_dda(time):
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
file_name_str = (out_data_path +
'/ddop/cf_compliant_grid' +
year_str + month_str + day_str + hour_str +
minute_str + '.nc')
radar = pyart.io.read_grid(file_name_str)
return radar
def write_grid(time, grid):
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
file_name_str = (out_data_path +
'/ddop/cf_compliant_grid' +
year_str + month_str + day_str + hour_str +
minute_str + '.nc')
radar = grid.write(file_name_str)
return radar
| 35.249367
| 78
| 0.576687
| 3,696
| 27,847
| 4.055736
| 0.065476
| 0.038759
| 0.03002
| 0.028019
| 0.888659
| 0.868913
| 0.852835
| 0.846164
| 0.837292
| 0.83469
| 0
| 0.024516
| 0.326211
| 27,847
| 789
| 79
| 35.294043
| 0.774396
| 0.073976
| 0
| 0.752182
| 0
| 0
| 0.072535
| 0.029917
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027923
| false
| 0
| 0.04363
| 0
| 0.097731
| 0.027923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
135ab2dcd097c0f54b4e821dbfc89c61a16364ad
| 456
|
py
|
Python
|
test_unit/test_S3.py
|
JosselinLuneau/BachelorDIM-Lectures-Algorithms-2019
|
0af5fe53379c4691dbe8c1f025d84822d4838f9a
|
[
"MIT"
] | null | null | null |
test_unit/test_S3.py
|
JosselinLuneau/BachelorDIM-Lectures-Algorithms-2019
|
0af5fe53379c4691dbe8c1f025d84822d4838f9a
|
[
"MIT"
] | null | null | null |
test_unit/test_S3.py
|
JosselinLuneau/BachelorDIM-Lectures-Algorithms-2019
|
0af5fe53379c4691dbe8c1f025d84822d4838f9a
|
[
"MIT"
] | null | null | null |
import pytest
import scripts.S3_imgproc_tools as S3
import numpy as np
def test_invert_colors_manual_slow():
img=np.array([[[255,255,255], [255, 255, 255], [255, 255, 255]]])
assert (S3.invert_colors_manual_slow(img) == np.array([[[0,0,0], [0,0,0], [0, 0, 0]]])).prod()
def test_invert_colors_manual_fast():
img=np.array([[255,255,255], [255, 255, 255]])
assert (S3.invert_colors_manual_fast(img) == np.array([[0,0,0], [0,0,0]])).prod()
| 35.076923
| 98
| 0.657895
| 81
| 456
| 3.506173
| 0.271605
| 0.274648
| 0.348592
| 0.380282
| 0.795775
| 0.746479
| 0.746479
| 0.535211
| 0.524648
| 0.309859
| 0
| 0.160401
| 0.125
| 456
| 12
| 99
| 38
| 0.551378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.222222
| false
| 0
| 0.333333
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
1362a626bc8c2b67809f3ace50687d3a3e51014f
| 4,756
|
py
|
Python
|
userbot/modules/antiflood.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/antiflood.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/antiflood.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#Encript Marshal By XVenom
#https://github.com/xvenom15
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00@\x00\x00\x00s\xbe\x00\x00\x00d\x00d\x01l\x00Z\x00d\x00d\x02l\x01m\x02Z\x02\x01\x00d\x00d\x03l\x03m\x04Z\x04\x01\x00d\x00d\x04l\x05m\x06Z\x06\x01\x00d\x00d\x05l\x07m\x08Z\x08\x01\x00d\x00d\x01l\tm\n\x02\x00\x01\x00m\x0b\x02\x00\x01\x00m\x0cZ\r\x01\x00d\x00d\x06l\x0em\x0fZ\x0f\x01\x00d\x00d\x07l\x10m\x11Z\x11m\x12Z\x12\x01\x00e\r\xa0\x13\xa1\x00Z\x14e\x06d\x01d\x01d\x08d\t\x8d\x03Z\x15e\x0fd\x08d\x08d\x08d\n\x8d\x03d\x0bd\x0c\x84\x00\x83\x01Z\x16e\x0fd\x08d\rd\x0e\x8d\x02d\x0fd\x0c\x84\x00\x83\x01Z\x16e\x11\xa0\x17d\x10d\x11i\x01\xa1\x01\x01\x00d\x01S\x00)\x12\xe9\x00\x00\x00\x00N)\x01\xda\x06events)\x01\xda\x11EditBannedRequest)\x01\xda\x10ChatBannedRights)\x01\xda\x08is_admin)\x01\xda\x08register)\x02\xda\x08CMD_HELP\xda\x03botT)\x03Z\nuntil_dateZ\rview_messagesZ\rsend_messages)\x03Z\x08incomingZ\x0edisable_editedZ\x0edisable_errorsc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00s\x12\x01\x00\x00t\x00s\x08d\x00S\x00t\x01|\x00j\x02|\x00j\x03j\x04\x83\x02I\x00d\x00H\x00}\x01|\x01r&d\x00S\x00t\x05|\x00j\x02\x83\x01t\x00k\x07r8d\x00S\x00t\x06\xa0\x07|\x00j\x02|\x00j\x03j\x04\xa1\x02}\x02|\x02sRd\x00S\x00z"|\x00\xa0\x08t\t|\x00j\x02|\x00j\x03j\x04t\n\x83\x03\xa1\x01I\x00d\x00H\x00\x01\x00W\x00np\x04\x00t\x0bk\nr\xe4\x01\x00}\x03\x01\x00zR|\x00j\x08j\x0c|\x00j\x02d\x01\xa0\r|\x00j\x03j\x04t\x05|\x03\x83\x01\xa1\x02|\x00j\x03j\x0ed\x02\x8d\x03I\x00d\x00H\x00}\x04t\x0f\xa0\x10d\x03\xa1\x01I\x00d\x00H\x00\x01\x00|\x04\xa0\x11d\x04\xa1\x01I\x00d\x00H\x00\x01\x00W\x005\x00d\x00}\x03~\x03X\x00Y\x00n*X\x00|\x00j\x08j\x0c|\x00j\x02d\x05\xa0\r|\x00j\x03j\x04\xa1\x01|\x00j\x03j\x0ed\x02\x8d\x03I\x00d\x00H\x00\x01\x00d\x00S\x00)\x06NzU**Automatic AntiFlooder**\n@admin [User](tg://user?id={}) is flooding this chat.\n\n`{}`)\x03Z\x06entity\xda\x07messageZ\x08reply_to\xe9\n\x00\x00\x00z"Sadly u don\'t have admin privilegez\x7f**Automatic AntiFlooder**\n[User](tg://user?id={}) has been automatically restricted\nbecause he reached the defined flood limit.)\x12\xda\nCHAT_FLOODr\x05\x00\x00\x00\xda\x07chat_idr\t\x00\x00\x00Z\x07from_id\xda\x03str\xda\x03sqlZ\x0cupdate_floodZ\x06clientr\x03\x00\x00\x00\xda\x14ANTI_FLOOD_WARN_MODE\xda\tExceptionZ\x0csend_message\xda\x06format\xda\x02id\xda\x07asyncio\xda\x05sleep\xda\x04edit)\x05\xda\x05eventZ\x07admin_cZ\nshould_ban\xda\x01eZ\x1ano_admin_privilege_message\xa9\x00r\x18\x00\x00\x00\xda\x00\xda\x01_\x15\x00\x00\x00sH\x00\x00\x00\x00\x03\x04\x01\x04\x01\x16\x01\x04\x01\x04\x01\x0e\x01\x04\x01\x12\x01\x04\x01\x04\x01\x02\x01\x06\x01\x04\x01\x06\x01\x02\xfd\x10\x05\x10\x01\x06\x01\x04\x01\x04\x03\x06\x00\x06\xfd\x02\x04\x06\xfa\x0c\x08\x10\x01\x04\x01\x02\xff\x1c\x03\x06\x01\x04\x01\x04\x02\x06\xfe\x02\x03\x06\xfbr\x1a\x00\x00\x00z\x15^.setflood(?: |$)(.*))\x02Z\x08outgoingZ\x07patternc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00s\x82\x00\x00\x00|\x00j\x00r\nd\x00S\x00|\x00j\x01\xa0\x02d\x01\xa1\x01}\x01z0t\x03\xa0\x04|\x00j\x05|\x01\xa1\x02\x01\x00t\x03\xa0\x06\xa1\x00}\x02|\x00\xa0\x07d\x02\xa0\x08|\x01\xa1\x01\xa1\x01I\x00d\x00H\x00\x01\x00W\x00n6\x04\x00t\tk\nr|\x01\x00}\x03\x01\x00z\x18|\x00\xa0\x07t\n|\x03\x83\x01\xa1\x01I\x00d\x00H\x00\x01\x00W\x005\x00d\x00}\x03~\x03X\x00Y\x00n\x02X\x00d\x00S\x00)\x03N\xe9\x01\x00\x00\x00z+Antiflood updated to {} in the current chat)\x0bZ\x08fwd_fromZ\rpattern_match\xda\x05groupr\x0e\x00\x00\x00Z\tset_floodr\x0c\x00\x00\x00\xda\x15__load_flood_settingsr\x15\x00\x00\x00r\x11\x00\x00\x00r\x10\x00\x00\x00r\r\x00\x00\x00)\x04r\x16\x00\x00\x00Z\tinput_strr\x0b\x00\x00\x00r\x17\x00\x00\x00r\x18\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00>\x00\x00\x00s\x12\x00\x00\x00\x00\x02\x06\x01\x04\x01\x0c\x01\x02\x01\x0e\x01\x08\x01\x1a\x01\x10\x01Z\tantifloodz>`.setflood` <count> \nUsage: To setting flood on your group.)\x18r\x13\x00\x00\x00Z\x08telethonr\x02\x00\x00\x00Z\x1etelethon.tl.functions.channelsr\x03\x00\x00\x00Z\x11telethon.tl.typesr\x04\x00\x00\x00Z\x13userbot.utils.toolsr\x05\x00\x00\x00Z(userbot.modules.sql_helper.antiflood_sql\xda\x07modulesZ\nsql_helperZ\rantiflood_sqlr\x0e\x00\x00\x00Z\x0euserbot.eventsr\x06\x00\x00\x00Z\x07userbotr\x07\x00\x00\x00r\x08\x00\x00\x00r\x1d\x00\x00\x00r\x0b\x00\x00\x00r\x0f\x00\x00\x00r\x1a\x00\x00\x00\xda\x06updater\x18\x00\x00\x00r\x18\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x08<module>\x01\x00\x00\x00s*\x00\x00\x00\x08\x01\x0c\x01\x0c\x01\x0c\x01\x0c\x01\x18\x01\x0c\x01\x10\x04\x08\x02\x02\x01\x02\x01\x02\x01\x02\xfd\x06\x07\x0c\x01\n(\n\x01\n\x0c\x04\x01\x02\x01\x02\xfe'))
| 1,189
| 4,685
| 0.776493
| 970
| 4,756
| 3.772165
| 0.279381
| 0.182017
| 0.137743
| 0.114785
| 0.229844
| 0.182564
| 0.166166
| 0.13419
| 0.124898
| 0.10686
| 0
| 0.313509
| 0.010093
| 4,756
| 4
| 4,685
| 1,189
| 0.463679
| 0.010934
| 0
| 0
| 0
| 0.5
| 0.412928
| 0.407187
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
1380cd175759a6d339fbdd582b696d9b7ac53fe6
| 135
|
py
|
Python
|
modules.py
|
gayensouvik1/zmq
|
f0d8f0b0558010446bec0e50835a6ed8e772b7e8
|
[
"MIT"
] | 1
|
2018-05-10T15:12:19.000Z
|
2018-05-10T15:12:19.000Z
|
modules.py
|
gayensouvik1/zmq
|
f0d8f0b0558010446bec0e50835a6ed8e772b7e8
|
[
"MIT"
] | null | null | null |
modules.py
|
gayensouvik1/zmq
|
f0d8f0b0558010446bec0e50835a6ed8e772b7e8
|
[
"MIT"
] | 2
|
2018-10-14T09:33:46.000Z
|
2018-10-14T09:56:16.000Z
|
import numpy as np
import pickle
import json
def arr2str(arr):
return pickle.dumps(arr)
def str2arr(arr):
return pickle.loads(arr)
| 13.5
| 25
| 0.762963
| 22
| 135
| 4.681818
| 0.590909
| 0.174757
| 0.291262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017391
| 0.148148
| 135
| 9
| 26
| 15
| 0.878261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.428571
| 0.285714
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
13a53ceef4595ab1e3a53fca8c491f3912a27f7d
| 12,270
|
py
|
Python
|
yandex/cloud/cdn/v1/resource_service_pb2_grpc.py
|
ovandriyanov/python-sdk
|
eec7dc65ef23789388fa46d13087d4a03cdc6e57
|
[
"MIT"
] | null | null | null |
yandex/cloud/cdn/v1/resource_service_pb2_grpc.py
|
ovandriyanov/python-sdk
|
eec7dc65ef23789388fa46d13087d4a03cdc6e57
|
[
"MIT"
] | null | null | null |
yandex/cloud/cdn/v1/resource_service_pb2_grpc.py
|
ovandriyanov/python-sdk
|
eec7dc65ef23789388fa46d13087d4a03cdc6e57
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from yandex.cloud.cdn.v1 import resource_pb2 as yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__pb2
from yandex.cloud.cdn.v1 import resource_service_pb2 as yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
class ResourceServiceStub(object):
"""Provider's resources management service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.cdn.v1.ResourceService/Get',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.GetResourceRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__pb2.Resource.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.cdn.v1.ResourceService/List',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.ListResourcesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.ListResourcesResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.cdn.v1.ResourceService/Create',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.CreateResourceRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.cdn.v1.ResourceService/Update',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.UpdateResourceRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.cdn.v1.ResourceService/Delete',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.DeleteResourceRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.GetProviderCName = channel.unary_unary(
'/yandex.cloud.cdn.v1.ResourceService/GetProviderCName',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.GetProviderCNameRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.GetProviderCNameResponse.FromString,
)
class ResourceServiceServicer(object):
"""Provider's resources management service.
"""
def Get(self, request, context):
"""Get client's CDN resource by resource id.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Lists CDN resources.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates a CDN resource in the specified folder.
Creation may take up to 15 minutes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Updates the specified CDN resource.
The method implements patch behaviour, i.e. only the fields specified in the request are updated in the resource.
Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge the resource's cache via a
[CacheService.Purge] request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes client's CDN resource.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetProviderCName(self, request, context):
"""Get Provider's CNAME (edge endpoint) bind to specified folder id.
Returns UNIMPLEMENTED error, if provider doesn't support CNAME request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ResourceServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.GetResourceRequest.FromString,
response_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__pb2.Resource.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.ListResourcesRequest.FromString,
response_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.ListResourcesResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.CreateResourceRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.UpdateResourceRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.DeleteResourceRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'GetProviderCName': grpc.unary_unary_rpc_method_handler(
servicer.GetProviderCName,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.GetProviderCNameRequest.FromString,
response_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.GetProviderCNameResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.cdn.v1.ResourceService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ResourceService(object):
"""Provider's resources management service.
"""
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.ResourceService/Get',
yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.GetResourceRequest.SerializeToString,
yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__pb2.Resource.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.ResourceService/List',
yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.ListResourcesRequest.SerializeToString,
yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.ListResourcesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.ResourceService/Create',
yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.CreateResourceRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.ResourceService/Update',
yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.UpdateResourceRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.ResourceService/Delete',
yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.DeleteResourceRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetProviderCName(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.ResourceService/GetProviderCName',
yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.GetProviderCNameRequest.SerializeToString,
yandex_dot_cloud_dot_cdn_dot_v1_dot_resource__service__pb2.GetProviderCNameResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 48.884462
| 142
| 0.697718
| 1,284
| 12,270
| 6.228972
| 0.116044
| 0.043886
| 0.068267
| 0.082896
| 0.835084
| 0.831208
| 0.810203
| 0.762191
| 0.721055
| 0.721055
| 0
| 0.009593
| 0.235371
| 12,270
| 250
| 143
| 49.08
| 0.842891
| 0.081826
| 0
| 0.541237
| 1
| 0
| 0.078978
| 0.050463
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072165
| false
| 0
| 0.020619
| 0.030928
| 0.139175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
13e7e97306698276c46ca709101ac75cf52d32f1
| 277,540
|
py
|
Python
|
CSZL_Framwork2020/FeatureEnvironment.py
|
BNDKG/CSZL_2020
|
e7a0aca46708bb09a929df77ce431e6400743cf8
|
[
"MIT"
] | null | null | null |
CSZL_Framwork2020/FeatureEnvironment.py
|
BNDKG/CSZL_2020
|
e7a0aca46708bb09a929df77ce431e6400743cf8
|
[
"MIT"
] | null | null | null |
CSZL_Framwork2020/FeatureEnvironment.py
|
BNDKG/CSZL_2020
|
e7a0aca46708bb09a929df77ce431e6400743cf8
|
[
"MIT"
] | null | null | null |
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_qliba2(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
df_all=FEsingle.PredictDaysTrend(df_all,5)
print(df_all)
df_all=df_all.loc[:,['ts_code','trade_date','tomorrow_chg','tomorrow_chg_rank']]
print(df_all.dtypes)
print(df_all)
#===================================================================================================================================#
#获取qlib特征
###df_qlib_1=pd.read_csv('zzztest.csv',header=0)
###df_qlib_2=pd.read_csv('zzztest2.csv',header=0)
##df_qlib_1=pd.read_csv('2013.csv',header=0)
###df_qlib_1=df_qlib_1.iloc[:,0:70]
##df_qlib_all_l=df_qlib_1.iloc[:,0:2]
##df_qlib_all_r=df_qlib_1.iloc[:,70:]
##df_qlib_1 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##print(df_qlib_1.head(10))
##df_qlib_2=pd.read_csv('2015.csv',header=0)
##df_qlib_all_l=df_qlib_2.iloc[:,0:2]
##df_qlib_all_r=df_qlib_2.iloc[:,70:]
##df_qlib_2 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_3=pd.read_csv('2017.csv',header=0)
##df_qlib_all_l=df_qlib_3.iloc[:,0:2]
##df_qlib_all_r=df_qlib_3.iloc[:,70:]
##df_qlib_3 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_4=pd.read_csv('2019.csv',header=0)
##df_qlib_all_l=df_qlib_4.iloc[:,0:2]
##df_qlib_all_r=df_qlib_4.iloc[:,70:]
##df_qlib_4 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_all=pd.concat([df_qlib_2,df_qlib_1])
##df_qlib_all=pd.concat([df_qlib_3,df_qlib_all])
##df_qlib_all=pd.concat([df_qlib_4,df_qlib_all])
##df_qlib_all.drop_duplicates()
##print(df_qlib_all.head(10))
##df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
##df_qlib_all.to_csv("13to21_first70plus.csv")
df_qlib_all=pd.read_csv('13to21_first70plus.csv',header=0)
#df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
print(df_qlib_all)
df_qlib_all.rename(columns={'datetime':'trade_date','instrument':'ts_code','score':'mix'}, inplace = True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all['trade_date'] = pd.to_datetime(df_qlib_all['trade_date'], format='%Y-%m-%d')
df_qlib_all['trade_date']=df_qlib_all['trade_date'].apply(lambda x: x.strftime('%Y%m%d'))
df_qlib_all['trade_date'] = df_qlib_all['trade_date'].astype(int)
df_qlib_all['ts_codeL'] = df_qlib_all['ts_code'].str[:2]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_code'].str[2:]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_codeR'].apply(lambda s: s+'.')
df_qlib_all['ts_code']=df_qlib_all['ts_codeR'].str.cat(df_qlib_all['ts_codeL'])
df_qlib_all.drop(['ts_codeL','ts_codeR'],axis=1,inplace=True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all=df_qlib_all.fillna(value=0)
df_all=pd.merge(df_all, df_qlib_all, how='left', on=['ts_code','trade_date'])
print(df_all)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEonlinew_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=FEsingle.InputChgSum(df_all,5,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,5,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,5,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,12,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,12,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,12,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,25,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,25,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,25,'net_mf_amount')
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
df_all=df_all[df_all['total_mv_rank']<6]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#df_all['ts_code_try']=df_all['ts_code'].map(lambda x : x[:-3])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
#df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
#df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
#df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,24)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*19.9//2
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a23_pos(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#df_all['ts_code_try']=df_all['ts_code'].map(lambda x : x[:-3])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['tomorrow_chg_rank']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPy(x)).reset_index(0,drop=True)
df_all['tomorrow_chg_rank']=df_all.groupby('ts_code')['tomorrow_chg_rank'].shift(-20)
df_all['tomorrow_chg']=df_all['tomorrow_chg_rank']
#df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
#df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
#df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,24)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*19.9//2
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
#df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a41(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
#df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
#df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
#df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
#df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
#df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
#df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
#df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
#df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#print(df_money_all)
#df_data,_=FEsingle.DayFeatureToAll(df_data,name='pct_chg',method='std')
#df_data,_=FEsingle.DayFeatureToAll(df_data,name='pct_chg',method='mean')
##df_data.to_csv('testsee1120.csv')
#print(df_data)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
#df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
#df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
#df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
#df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
#df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
#df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
#df_all,_=FEsingle.HighLowRange(df_all,5)
#df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
#df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
#df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
#df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
#df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
#df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
#df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
#df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
#df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
#df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
#df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
#df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
##df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
##df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEfast_a41e(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
#df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
#df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
#df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
#df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
#df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
#df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
#df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
#df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#print(df_money_all)
#df_data,_=FEsingle.DayFeatureToAll(df_data,name='pct_chg',method='std')
#df_data,_=FEsingle.DayFeatureToAll(df_data,name='pct_chg',method='mean')
#df_data['pct_chg_DayFeatureToAll_std_1']=df_data.groupby('ts_code')['pct_chg_DayFeatureToAll_std'].shift(1)
#df_data['pct_chg_DayFeatureToAll_mean_1']=df_data.groupby('ts_code')['pct_chg_DayFeatureToAll_mean'].shift(1)
#df_data['pct_chg_DayFeatureToAll_std_2']=df_data.groupby('ts_code')['pct_chg_DayFeatureToAll_std'].shift(2)
#df_data['pct_chg_DayFeatureToAll_mean_2']=df_data.groupby('ts_code')['pct_chg_DayFeatureToAll_mean'].shift(2)
#将空白的地方填满
df_long_all['pe'].fillna(999,inplace=True)
df_long_all['pb'].fillna(99,inplace=True)
df_long_all['ps_ttm'].fillna(99,inplace=True)
df_long_all['dv_ttm'].fillna(0,inplace=True)
#df_long_all.to_csv('testsee1120.csv')
print(df_long_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
#df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio'],axis=1,inplace=True)
#df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
#df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all['pe_rank']=df_all.groupby('trade_date')['pe'].rank(pct=True)
df_all['pe_rank']=df_all.groupby('ts_code')['pe_rank'].shift(1)
df_all['dv_ttm']=df_all.groupby('trade_date')['dv_ttm'].rank(pct=True)
df_all['dv_ttm']=df_all.groupby('ts_code')['dv_ttm'].shift(1)
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
#df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
#df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
#df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
#df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
#df_all,_=FEsingle.HighLowRange(df_all,5)
#df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
#df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
#df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
#df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
#df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
#df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
#df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
#df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
#df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
#df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
#df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
#df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
##df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
##df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs','pe'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEfast_b02(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
#df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
#df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
#df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
#df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
#df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
#df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
#df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
#df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#print(df_money_all)
#df_data,_=FEsingle.DayFeatureToAll(df_data,name='pct_chg',method='std')
#df_data,_=FEsingle.DayFeatureToAll(df_data,name='pct_chg',method='mean')
##df_data.to_csv('testsee1120.csv')
#print(df_data)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
#df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
#df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all=FEsingle.PredictDaysReal5day(df_all,5)
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
#df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
#df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
#df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow_self(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow_self(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow_self(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow_self(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow_self(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow_self(df_all,5,'max')
df_all,_=FEsingle.HighLowRange_self(df_all,5)
df_all,_=FEsingle.HighLowRange_self(df_all,12)
df_all,_=FEsingle.HighLowRange_self(df_all,25)
#df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
#df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
#df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
#df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
#df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
#df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
#df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
#df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSum_self(df_all,6)
df_all=FEsingle.PctChgSum_self(df_all,3)
df_all=FEsingle.PctChgSum_self(df_all,6)
df_all=FEsingle.PctChgSum_self(df_all,12)
df_all=FEsingle.PctChgSum_self(df_all,24)
#df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
#df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
#df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
#df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
#df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
#df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
#df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
##df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
##df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class trend_following(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
#df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
#df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
#df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
#df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
#df_all.drop(['turnover_rate','volume_ratio','dv_ttm'],axis=1,inplace=True)
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
longline=60
bufferbak='_'+str(longline)
#均量计算
xxx=df_all.groupby('ts_code')['real_price'].rolling(longline).mean().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
shortline=5
bufferbak='_'+str(shortline)
#均量计算
xxx=df_all.groupby('ts_code')['real_price'].rolling(shortline).mean().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
#寻找unique
codelistbuffer=df_all['ts_code']
codelistbuffer=codelistbuffer.unique()
codelist=codelistbuffer.tolist()
cutlimit=-0.1
df_ana = pd.DataFrame(columns = ["code", "data", "differ"])
ct=0
for curcode in codelist:
cur_code_df=df_all[df_all['ts_code']==curcode]
count_flag=0
startprice=0
stopprice=0
changesum=1
start0_price=cur_code_df.loc[cur_code_df.index[0]].values[12]
for indexs in cur_code_df.index:
cur_code_day=cur_code_df.loc[indexs].values[1]
cur_code_now=cur_code_df.loc[indexs].values[12]
cur_code_avg_short=cur_code_df.loc[indexs].values[14]
cur_code_avg_long=cur_code_df.loc[indexs].values[13]
if(cur_code_avg_short>cur_code_avg_long):
if(count_flag==0):
count_flag=1
startprice=cur_code_now
#print(cur_code_day,end='')
#print(' ',end='')
##print(cur_code_day)
#print(startprice,end='')
#print(' ',end='')
##print(cur_code_day)
#print('buy')
if(count_flag==1):
#计算止损
nowcut=(cur_code_now-startprice)/startprice
if(nowcut<cutlimit):
count_flag=0
differ=nowcut
changesum=changesum*(1+differ)
#print(cur_code_day,end='')
#print(' ',end='')
#print(cur_code_now,end='')
#print(' ',end='')
#print('cutsell')
df_ana.loc[df_ana.shape[0]+1] = {'code':curcode,'data':cur_code_day,'differ':differ}
continue
if(cur_code_avg_short<cur_code_avg_long):
count_flag=0
#stopprice=cur_code_now
#print(cur_code_day,end='')
#print(' ',end='')
#print(cur_code_now,end='')
#print(' ',end='')
#print('sell')
differ=(cur_code_now-startprice)/startprice
changesum=changesum*(1+differ)
df_ana.loc[df_ana.shape[0]+1] = {'code':curcode,'data':cur_code_day,'differ':differ}
#print(cur_code_df.loc[indexs].values[0:-1])
if(count_flag==1):
differ=(cur_code_now-startprice)/startprice
changesum=changesum*(1+differ)
df_ana.loc[df_ana.shape[0]+1] = {'code':curcode,'data':cur_code_day,'differ':differ}
#if(ct%100==0):
# #df_ana.to_csv('save10_2avg.csv')
# print(ct)
ct+=1
stopprice=cur_code_df.loc[cur_code_df.index[-1]].values[12]
pctnostra=stopprice/start0_price
print(curcode,end='')
print(' ',end='')
print(pctnostra,end='')
print(' ',end='')
print(changesum)
#print(cur_code_df)
dddddddd=1
df_ana.to_csv('save10_2avg.csv')
#df_all.to_csv('read5mean.csv')
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,30,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,10,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,30,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,10,'max')
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,10)
#print(df_all)
df_all=FEsingle.AmountChgRank(df_all,10)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_10'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_10'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_10'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['total_mv_rank']<5]
df_all=df_all[df_all['amount']>15000]
df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEsingle:
def CloseWithHighLow(df_all,days,minmax='min',intflag=False,standardflag=False,trainflag=False):
#输入几日和最高或最低返回排名
#30日最低比值
stringdisplay=str(days)+'_pct_rank_'+minmax
if(minmax=='min'):
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).min().reset_index()
else:
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).max().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix='_30min')
#bbb=df_all.groupby('ts_code')['real_price'].agg({'all_min':np.min, 'all_max': np.max}).reset_index()
#ccc=pd.merge(df_all, bbb, how='inner', on=['ts_code'])
df_all['30_pct']=(100*(df_all['real_price']+0.001-df_all['real_price_30min']))/df_all['real_price_30min']
if(standardflag):
stringdisplay2=stringdisplay+'_Standard'
stringdisplaysave=stringdisplay2+'.pkl'
num_data=df_all[['30_pct']]
if(trainflag):
scaler=StandardScaler()
num_data=scaler.fit_transform(num_data.values)
joblib.dump(scaler,stringdisplaysave)
else:
scaler2=joblib.load(stringdisplaysave)
num_data=scaler2.transform(num_data.values)
df_all[stringdisplay2]=num_data
df_all[stringdisplay]=df_all.groupby('trade_date')['30_pct'].rank(pct=True)
df_all.drop(['30_pct','real_price_30min'],axis=1,inplace=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all,stringdisplay
def CloseWithHighLow_self(df_all,days,minmax='min',intflag=False,standardflag=False,trainflag=False):
#输入几日和最高或最低返回排名
#30日最低比值
stringdisplay=str(days)+'_pct_rank_'+minmax
if(minmax=='min'):
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).min().reset_index()
else:
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).max().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix='_30min')
#bbb=df_all.groupby('ts_code')['real_price'].agg({'all_min':np.min, 'all_max': np.max}).reset_index()
#ccc=pd.merge(df_all, bbb, how='inner', on=['ts_code'])
df_all[stringdisplay]=(100*(df_all['real_price']+0.001-df_all['real_price_30min']))/df_all['real_price_30min']
#df_all[stringdisplay]=df_all.groupby('trade_date')['30_pct'].rank(pct=True)
df_all.drop(['real_price_30min'],axis=1,inplace=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all,stringdisplay
def HighLowRange(df_all,days,intflag=False):
#输入几日和最高或最低返回排名
#30日最低比值
stringdisplay=str(days)+'_pct_Rangerank'
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).min().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix='_30min')
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).max().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix='_30max')
#bbb=df_all.groupby('ts_code')['real_price'].agg({'all_min':np.min, 'all_max': np.max}).reset_index()
#ccc=pd.merge(df_all, bbb, how='inner', on=['ts_code'])
df_all['30_pct']=(100*(df_all['real_price_30max']+0.001-df_all['real_price_30min']))/df_all['real_price_30min']
df_all[stringdisplay]=df_all.groupby('trade_date')['30_pct'].rank(pct=True)
df_all.drop(['30_pct','real_price_30min','real_price_30max'],axis=1,inplace=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all,stringdisplay
def HighLowRange_self(df_all,days,intflag=False):
#输入几日和最高或最低返回排名
#30日最低比值
stringdisplay=str(days)+'_pct_Rangerank'
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).min().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix='_30min')
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).max().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix='_30max')
#bbb=df_all.groupby('ts_code')['real_price'].agg({'all_min':np.min, 'all_max': np.max}).reset_index()
#ccc=pd.merge(df_all, bbb, how='inner', on=['ts_code'])
df_all[stringdisplay]=(100*(df_all['real_price_30max']+0.001-df_all['real_price_30min']))/df_all['real_price_30min']
#df_all[stringdisplay]=df_all.groupby('trade_date')['30_pct'].rank(pct=True)
df_all.drop(['real_price_30min','real_price_30max'],axis=1,inplace=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all,stringdisplay
def HighLowRangeReal(df_all,days,intflag=False):
#输入几日和最高或最低返回排名
#30日最低比值
stringdisplay=str(days)+'_pct_RangerankReal'
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).min().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix='_30min')
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).max().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix='_30max')
#bbb=df_all.groupby('ts_code')['real_price'].agg({'all_min':np.min, 'all_max': np.max}).reset_index()
#ccc=pd.merge(df_all, bbb, how='inner', on=['ts_code'])
df_all['30_pct']=(100*(df_all['real_price_30max']+0.001-df_all['real_price_30min']))/df_all['real_price_30min']
df_all[stringdisplay]=df_all['30_pct']
df_all.drop(['30_pct','real_price_30min','real_price_30max'],axis=1,inplace=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all,stringdisplay
def CloseWithHighLow_notrank(df_all,days,minmax='min'):
#输入几日和最高或最低返回实际值
stringdisplay=str(days)+'_pct_rank_'+minmax
if(minmax=='min'):
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).min().reset_index()
else:
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).max().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix='_30min')
#bbb=df_all.groupby('ts_code')['real_price'].agg({'all_min':np.min, 'all_max': np.max}).reset_index()
#ccc=pd.merge(df_all, bbb, how='inner', on=['ts_code'])
df_all[stringdisplay]=(10*(df_all['real_price']-df_all['real_price_30min']))/df_all['real_price_30min']
df_all.drop(['real_price_30min'],axis=1,inplace=True)
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all,stringdisplay
def CloseWithHighLow_realrank(df_all,days,minmax='min'):
#输入几日和最高或最低返回排名
#30日最低比值
stringdisplay=str(days)+'_pct_realrank_'+minmax
if(minmax=='min'):
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).min().reset_index()
else:
xxx=df_all.groupby('ts_code')['real_price'].rolling(days).max().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix='_30min')
#bbb=df_all.groupby('ts_code')['real_price'].agg({'all_min':np.min, 'all_max': np.max}).reset_index()
#ccc=pd.merge(df_all, bbb, how='inner', on=['ts_code'])
df_all['30_pct']=(100*(df_all['real_price']-df_all['real_price_30min']))/df_all['real_price_30min']
df_all[stringdisplay]=df_all['30_pct'].rank(pct=True)
df_all.drop(['30_pct','real_price_30min'],axis=1,inplace=True)
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all,stringdisplay
def DayFeatureToAll(df_input,name,method='mean'):
#输入需要对哪列进行处理,以及处理的方法
#输出自动添加到对于df后方
stringdisplay=name+'_DayFeatureToAll_'+method
dftest=df_input.groupby(df_input['trade_date'])[name].agg(method)
dftest2=pd.DataFrame({stringdisplay:dftest}).reset_index()
df_input=pd.merge(df_input, dftest2, how='left', on=['trade_date'])
return df_input,stringdisplay
def changerank_line(df_all,b):
df_all[b]=df_all[b]*19.9//2
return df_all
def PredictDays(df_all,days):
##明日幅度
tms=[]
for i in range(days):
curindex=-i-1
curtm=df_all.groupby('ts_code')['pct_chg'].shift(curindex)
tms.append(curtm)
tmpdf=((100+tms[0])/100)
for i in range(days):
if i==0:
continue
tmpdf*=(100+tms[i])/100
df_all['tomorrow_chg']=(tmpdf-1)*100
#df_all['tomorrow_chg']=(((100+tm1)/100)*((100+tm2)/100)*((100+tm3)/100)*((100+tm4)/100)*((100+tm5)/100)-1)*100
#df_all['tomorrow_chg']=((100+tm1)*(100+tm2)*(100+tm3)-1000000)/10000
#df_all['tomorrow_chg']=df_all.groupby('ts_code')['pct_chg'].shift(-1)
#明日排名
df_all['tomorrow_chg_rank']=df_all.groupby('trade_date')['tomorrow_chg'].rank(pct=True)
df_all['tomorrow_chg_rank']=df_all['tomorrow_chg_rank']*9.9//1
return df_all
def PredictDaysStart(df_all,days):
##明日幅度
nextstart=df_all.groupby('ts_code')['real_price'].shift(0)
nextnstart=df_all.groupby('ts_code')['real_price'].shift(0-days)
#df_all['real_open2']=nextnstart
#df_all['real_open1']=nextstart
#df_all['real_open0']=df_all['real_open']
df_all['tomorrow_chg']=((nextnstart-nextstart)/nextstart)*100
#df_all['tomorrow_chg']=df_all['tomorrow_chg']*df_all['mvadj']
#df_all['tomorrow_chg']=(((100+tm1)/100)*((100+tm2)/100)*((100+tm3)/100)*((100+tm4)/100)*((100+tm5)/100)-1)*100
#df_all['tomorrow_chg']=((100+tm1)*(100+tm2)*(100+tm3)-1000000)/10000
#df_all['tomorrow_chg']=df_all.groupby('ts_code')['pct_chg'].shift(-1)
#明日排名
df_all['tomorrow_chg_rank']=df_all.groupby('trade_date')['tomorrow_chg'].rank(pct=True)
df_all['tomorrow_chg_rank']=df_all['tomorrow_chg_rank']*9.9//1
return df_all
def PredictDaysTrend(df_all,days):
##明日幅度
nextstart=df_all.groupby('ts_code')['real_price'].shift(0)
nextnstart=df_all.groupby('ts_code')['real_price'].shift(0-days)
#df_all['real_open2']=nextnstart
#df_all['real_open1']=nextstart
#df_all['real_open0']=df_all['real_open']
df_all['tomorrow_chg']=((nextnstart-nextstart)/nextstart)*100
#df_all['tomorrow_chg']=df_all['tomorrow_chg']*df_all['mvadj']
#df_all['tomorrow_chg']=(((100+tm1)/100)*((100+tm2)/100)*((100+tm3)/100)*((100+tm4)/100)*((100+tm5)/100)-1)*100
#df_all['tomorrow_chg']=((100+tm1)*(100+tm2)*(100+tm3)-1000000)/10000
#df_all['tomorrow_chg']=df_all.groupby('ts_code')['pct_chg'].shift(-1)
#明日排名
df_all['tomorrow_chg_rank']=df_all.groupby('trade_date')['tomorrow_chg'].rank(pct=True)
df_all['tomorrow_chg_rank']=df_all['tomorrow_chg_rank']*19.9//1
#df_all.loc[df_all['tomorrow_chg_rank']>28,'tomorrow_chg_rank']=30
#df_all['tomorrow_chg_rank']=df_all['tomorrow_chg_rank']//3
#df_all.loc[df_all['tomorrow_chg_rank']==9,'tomorrow_chg_rank']=8
#df_all.loc[df_all['tomorrow_chg_rank']==10,'tomorrow_chg_rank']=9
return df_all
def PredictDaysReal(df_all,days):
##明日幅度
nextstart=df_all.groupby('ts_code')['real_price'].shift(0)
nextnstart=df_all.groupby('ts_code')['real_price'].shift(0-days)
#df_all['real_open2']=nextnstart
#df_all['real_open1']=nextstart
#df_all['real_open0']=df_all['real_open']
df_all['tomorrow_chg']=((nextnstart-nextstart)/nextstart)*100
#df_all['tomorrow_chg']=(((100+tm1)/100)*((100+tm2)/100)*((100+tm3)/100)*((100+tm4)/100)*((100+tm5)/100)-1)*100
#df_all['tomorrow_chg']=((100+tm1)*(100+tm2)*(100+tm3)-1000000)/10000
#df_all['tomorrow_chg']=df_all.groupby('ts_code')['pct_chg'].shift(-1)
#明日排名
#df_all['tomorrow_chg_rank']=df_all.groupby('trade_date')['tomorrow_chg'].rank(pct=True)
df_all['tomorrow_chg_rank']=0
#df_all.loc[df_all['tomorrow_chg']>-4,'tomorrow_chg_rank']=1
#df_all.loc[df_all['tomorrow_chg']>-2.3,'tomorrow_chg_rank']=2
#df_all.loc[df_all['tomorrow_chg']>-1.4,'tomorrow_chg_rank']=3
#df_all.loc[df_all['tomorrow_chg']>-0.7,'tomorrow_chg_rank']=4
#df_all.loc[df_all['tomorrow_chg']>0,'tomorrow_chg_rank']=5
#df_all.loc[df_all['tomorrow_chg']>0.7,'tomorrow_chg_rank']=6
#df_all.loc[df_all['tomorrow_chg']>1.4,'tomorrow_chg_rank']=7
#df_all.loc[df_all['tomorrow_chg']>2.3,'tomorrow_chg_rank']=8
#df_all.loc[df_all['tomorrow_chg']>4,'tomorrow_chg_rank']=9
df_all.loc[df_all['tomorrow_chg']>-8,'tomorrow_chg_rank']=1
df_all.loc[df_all['tomorrow_chg']>-4.6,'tomorrow_chg_rank']=2
df_all.loc[df_all['tomorrow_chg']>-2.8,'tomorrow_chg_rank']=3
df_all.loc[df_all['tomorrow_chg']>-1.4,'tomorrow_chg_rank']=4
df_all.loc[df_all['tomorrow_chg']>0,'tomorrow_chg_rank']=5
df_all.loc[df_all['tomorrow_chg']>1.4,'tomorrow_chg_rank']=6
df_all.loc[df_all['tomorrow_chg']>2.8,'tomorrow_chg_rank']=7
df_all.loc[df_all['tomorrow_chg']>4.6,'tomorrow_chg_rank']=8
df_all.loc[df_all['tomorrow_chg']>8,'tomorrow_chg_rank']=9
return df_all
def PredictDaysReal5day(df_all,days):
##明日幅度
#Y=[-9.34,-5.48,-4.2,-3.4,-2.7,-2.3,-1.86,-1.47,-1.09,-0.74,-0.38,0,0.398,0.838,1.35,1.96,2.74,3.81,5.58,10.77]
nextstart=df_all.groupby('ts_code')['real_price'].shift(0)
nextnstart=df_all.groupby('ts_code')['real_price'].shift(0-days)
#df_all['real_open2']=nextnstart
#df_all['real_open1']=nextstart
#df_all['real_open0']=df_all['real_open']
df_all['tomorrow_chg']=((nextnstart-nextstart)/nextstart)*100
#df_all['tomorrow_chg']=(((100+tm1)/100)*((100+tm2)/100)*((100+tm3)/100)*((100+tm4)/100)*((100+tm5)/100)-1)*100
#df_all['tomorrow_chg']=((100+tm1)*(100+tm2)*(100+tm3)-1000000)/10000
#df_all['tomorrow_chg']=df_all.groupby('ts_code')['pct_chg'].shift(-1)
#明日排名
#df_all['tomorrow_chg_rank']=df_all.groupby('trade_date')['tomorrow_chg'].rank(pct=True)
df_all['tomorrow_chg_rank']=0
df_all.loc[df_all['tomorrow_chg']>-9.34,'tomorrow_chg_rank']=1
df_all.loc[df_all['tomorrow_chg']>-5.48,'tomorrow_chg_rank']=2
df_all.loc[df_all['tomorrow_chg']>-4.2,'tomorrow_chg_rank']=3
df_all.loc[df_all['tomorrow_chg']>-3.4,'tomorrow_chg_rank']=4
df_all.loc[df_all['tomorrow_chg']>-2.7,'tomorrow_chg_rank']=5
df_all.loc[df_all['tomorrow_chg']>-2.3,'tomorrow_chg_rank']=6
df_all.loc[df_all['tomorrow_chg']>-1.86,'tomorrow_chg_rank']=7
df_all.loc[df_all['tomorrow_chg']>-1.47,'tomorrow_chg_rank']=8
df_all.loc[df_all['tomorrow_chg']>-1.09,'tomorrow_chg_rank']=9
df_all.loc[df_all['tomorrow_chg']>-0.74,'tomorrow_chg_rank']=10
df_all.loc[df_all['tomorrow_chg']>-0.38,'tomorrow_chg_rank']=11
df_all.loc[df_all['tomorrow_chg']>0.398,'tomorrow_chg_rank']=12
df_all.loc[df_all['tomorrow_chg']>0.838,'tomorrow_chg_rank']=13
df_all.loc[df_all['tomorrow_chg']>1.35,'tomorrow_chg_rank']=14
df_all.loc[df_all['tomorrow_chg']>1.96,'tomorrow_chg_rank']=15
df_all.loc[df_all['tomorrow_chg']>2.74,'tomorrow_chg_rank']=16
df_all.loc[df_all['tomorrow_chg']>3.81,'tomorrow_chg_rank']=17
df_all.loc[df_all['tomorrow_chg']>5.58,'tomorrow_chg_rank']=18
df_all.loc[df_all['tomorrow_chg']>10.77,'tomorrow_chg_rank']=19
return df_all
def PredictDaysStartreal(df_all,days):
##明日幅度
nextstart=df_all.groupby('ts_code')['real_open'].shift(-1)
nextnstart=df_all.groupby('ts_code')['real_open'].shift(-5)
#df_all['real_open2']=nextnstart
#df_all['real_open1']=nextstart
#df_all['real_open0']=df_all['real_open']
df_all['tomorrow_chg']=(nextnstart-nextstart)/nextstart
#df_all['tomorrow_chg']=(((100+tm1)/100)*((100+tm2)/100)*((100+tm3)/100)*((100+tm4)/100)*((100+tm5)/100)-1)*100
#df_all['tomorrow_chg']=((100+tm1)*(100+tm2)*(100+tm3)-1000000)/10000
#df_all['tomorrow_chg']=df_all.groupby('ts_code')['pct_chg'].shift(-1)
#明日排名
df_all['tomorrow_chg_rank']=df_all['tomorrow_chg']*10//1
return df_all
def random_mix(df_all):
##明日幅度
length=df_all.shape[0]
data1=np.random.randint(-1,2,size=length)
data2=pd.DataFrame(data1)
df_all['rdtest']=1
df_all['rdtest']=df_all['rdtest']+data1
print(df_all)
nextstart=df_all.groupby('ts_code')['real_open'].shift(-1)
nextnstart=df_all.groupby('ts_code')['real_open'].shift(-5)
#df_all['real_open2']=nextnstart
#df_all['real_open1']=nextstart
#df_all['real_open0']=df_all['real_open']
df_all['tomorrow_chg']=(nextnstart-nextstart)/nextstart
#df_all['tomorrow_chg']=(((100+tm1)/100)*((100+tm2)/100)*((100+tm3)/100)*((100+tm4)/100)*((100+tm5)/100)-1)*100
#df_all['tomorrow_chg']=((100+tm1)*(100+tm2)*(100+tm3)-1000000)/10000
#df_all['tomorrow_chg']=df_all.groupby('ts_code')['pct_chg'].shift(-1)
#明日排名
df_all['tomorrow_chg_rank']=df_all.groupby('trade_date')['tomorrow_chg'].rank(pct=True)
df_all['tomorrow_chg_rank']=df_all['tomorrow_chg_rank']*9.9//1
return df_all
def PredictDays_reg(df_all,days):
##明日幅度
tms=[]
for i in range(days):
curindex=-i-1
curtm=df_all.groupby('ts_code')['pct_chg'].shift(curindex)
tms.append(curtm)
tmpdf=((100+tms[0])/100)
for i in range(days):
if i==0:
continue
tmpdf*=(100+tms[i])/100
df_all['tomorrow_chg']=(tmpdf-1)*100
#df_all['tomorrow_chg']=(((100+tm1)/100)*((100+tm2)/100)*((100+tm3)/100)*((100+tm4)/100)*((100+tm5)/100)-1)*100
#df_all['tomorrow_chg']=((100+tm1)*(100+tm2)*(100+tm3)-1000000)/10000
#df_all['tomorrow_chg']=df_all.groupby('ts_code')['pct_chg'].shift(-1)
#明日排名
df_all['tomorrow_chg_rank']=df_all.groupby('trade_date')['tomorrow_chg'].rank(pct=True)
#df_all['tomorrow_chg_rank']=df_all['tomorrow_chg_rank']*9.9//1
return df_all
def PredictDays_notrank(df_all,days):
##明日幅度
tms=[]
for i in range(days):
curindex=-i-1
curtm=df_all.groupby('ts_code')['pct_chg'].shift(curindex)
tms.append(curtm)
tmpdf=((100+tms[0])/100)
for i in range(days):
if i==0:
continue
tmpdf*=(100+tms[i])/100
df_all['tomorrow_chg']=(tmpdf-1)*100
#明日排名
df_all['tomorrow_chg_rank']=0
df_all.loc[df_all['tomorrow_chg']>-10,'tomorrow_chg_rank']=1
df_all.loc[df_all['tomorrow_chg']>-6,'tomorrow_chg_rank']=2
df_all.loc[df_all['tomorrow_chg']>-3,'tomorrow_chg_rank']=3
df_all.loc[df_all['tomorrow_chg']>-1,'tomorrow_chg_rank']=4
df_all.loc[df_all['tomorrow_chg']>0,'tomorrow_chg_rank']=5
df_all.loc[df_all['tomorrow_chg']>1,'tomorrow_chg_rank']=6
df_all.loc[df_all['tomorrow_chg']>3,'tomorrow_chg_rank']=7
df_all.loc[df_all['tomorrow_chg']>6,'tomorrow_chg_rank']=8
df_all.loc[df_all['tomorrow_chg']>10,'tomorrow_chg_rank']=9
return df_all
def PctChgSumRank(df_all,days,intflag=False):
bufferbak='_'+str(days)
stringdisplay='chg_rank_'+str(days)
xxx=df_all.groupby('ts_code')['chg_rank'].rolling(days).sum().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
df_all[stringdisplay]=df_all.groupby('trade_date')[stringdisplay].rank(pct=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all
def PctChgSum(df_all,days,intflag=False):
bufferbak='_'+str(days)
stringdisplay='pct_chg_'+str(days)
xxx=df_all.groupby('ts_code')['pct_chg'].rolling(days).sum().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
#df_all[stringdisplay]=df_all.groupby('trade_date')[stringdisplay].rank(pct=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all
def PctChgAbsSumRank(df_all,days,intflag=False):
bufferbak='_'+str(days)
stringdisplay='pct_chg_abs_'+str(days)
xxx=df_all.groupby('ts_code')['pct_chg_abs'].rolling(days).sum().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
df_all[stringdisplay]=df_all.groupby('trade_date')[stringdisplay].rank(pct=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all
def PctChgAbsSum_self(df_all,days,intflag=False):
bufferbak='_'+str(days)
stringdisplay='pct_chg_abs_'+str(days)
xxx=df_all.groupby('ts_code')['pct_chg_abs'].rolling(days).sum().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
#df_all[stringdisplay]=df_all.groupby('trade_date')[stringdisplay].rank(pct=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all
def PctChgSum_self(df_all,days,intflag=False):
bufferbak='_'+str(days)
stringdisplay='chg_rank_'+str(days)
xxx=df_all.groupby('ts_code')['pct_chg'].rolling(days).sum().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
#df_all[stringdisplay]=df_all.groupby('trade_date')[stringdisplay].rank(pct=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all
def InputChgSum(df_all,days,sumlinename,intflag=False):
bufferbak='_'+str(days)
stringdisplay=sumlinename+'_'+str(days)
xxx=df_all.groupby('ts_code')[sumlinename].rolling(days).sum().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
#df_all[stringdisplay]=df_all.groupby('trade_date')[stringdisplay].rank(pct=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all
def PctChgSumRank_notrank(df_all,days):
bufferbak='_'+str(days)
stringdisplay='chg_rank_'+str(days)
xxx=df_all.groupby('ts_code')['chg_rank'].rolling(days).sum().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
return df_all
def PctChgSumRank_Common(df_all,days,namechg='high'):
bufferbak='_'+str(days)
stringdisplay=namechg+str(days)
#6日
xxx=df_all.groupby('ts_code')[namechg].rolling(days).sum().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
df_all[stringdisplay]=df_all.groupby('trade_date')[stringdisplay].rank(pct=True)
#df_all=FEsingle.changerank_line(df_all,stringdisplay)
return df_all
def AmountChgRank(df_all,days,intflag=False):
bufferbak='_'+str(days)
amountstring='amount'+bufferbak
stringdisplay='pst_amount_rank_'+str(days)
#均量计算
xxx=df_all.groupby('ts_code')['amount'].rolling(days).mean().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
#当日量占比
df_all['pst_amount']=df_all['amount']/df_all[amountstring]
df_all.drop([amountstring],axis=1,inplace=True)
#当日量排名
df_all[stringdisplay]=df_all.groupby('trade_date')['pst_amount'].rank(pct=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
#df_all.drop(['pst_amount'],axis=1,inplace=True)
return df_all
def MoneyflowChgRank(df_all,days,intflag=False):
bufferbak='_'+str(days)
amountstring='amount'+bufferbak
stringdisplay='pst_amount_rank_'+str(days)
#均量计算
xxx=df_all.groupby('ts_code')['amount'].rolling(days).mean().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
#当日量占比
df_all['pst_amount']=df_all['amount']/df_all[amountstring]
df_all.drop([amountstring],axis=1,inplace=True)
#当日量排名
df_all[stringdisplay]=df_all.groupby('trade_date')['pst_amount'].rank(pct=True)
if(intflag):
df_all=FEsingle.changerank_line(df_all,stringdisplay)
df_all.drop(['pst_amount'],axis=1,inplace=True)
return df_all
def AmountChg_notrank(df_all,days):
bufferbak='_'+str(days)
amountstring='amount'+bufferbak
stringdisplay='pst_amount_notrank_'+str(days)
#均量计算
xxx=df_all.groupby('ts_code')['amount'].rolling(days).mean().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
#当日量占比
df_all[stringdisplay]=df_all['amount']/df_all[amountstring]
df_all.drop([amountstring],axis=1,inplace=True)
return df_all
def MoneyflowChgRank(df_all,days,shiftday=1):
bufferbak='_'+str(days)
moneyflowstring='moneyflow'+bufferbak
stringdisplay='pst_moneyflow_rank_'+str(days)
#均量计算
xxx=df_all.groupby('ts_code')['moneyflow'].rolling(days).mean().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
#当日量占比
df_all['pst_moneyflow']=df_all['moneyflow']/df_all[moneyflowstring]
df_all.drop([moneyflowstring],axis=1,inplace=True)
#当日量排名
df_all[stringdisplay]=df_all.groupby('trade_date')['pst_moneyflow'].rank(pct=True)
#df_all=FEsingle.changerank_line(df_all,stringdisplay)
#当日数据无法获取只能获取上一日数据
df_all[stringdisplay]=df_all.groupby('ts_code')[stringdisplay].shift(shiftday)
df_all.drop(['pst_moneyflow'],axis=1,inplace=True)
return df_all
def MoneyflowallChgRank(df_all,days,shiftday=1):
bufferbak='_'+str(days)
net_mf_amountstring='net_mf_amount'+bufferbak
stringdisplay='pst_net_mf_amount_rank_'+str(days)
#均量计算
xxx=df_all.groupby('ts_code')['net_mf_amount'].rolling(days).mean().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
#当日量占比
df_all['pst_net_mf_amount']=df_all['net_mf_amount']/df_all[net_mf_amountstring]
df_all.drop([net_mf_amountstring],axis=1,inplace=True)
#当日量排名
df_all[stringdisplay]=df_all.groupby('trade_date')['pst_net_mf_amount'].rank(pct=True)
#df_all=FEsingle.changerank_line(df_all,stringdisplay)
#当日数据无法获取只能获取上一日数据
df_all[stringdisplay]=df_all.groupby('ts_code')[stringdisplay].shift(shiftday)
df_all.drop(['pst_net_mf_amount'],axis=1,inplace=True)
return df_all
def MoneyflowsumChgRank(df_all,days,shiftday=1):
bufferbak='_'+str(days)
net_mf_amountstring='net_mf_amount'+bufferbak
stringdisplay='pst_net_mf_amount_sum_rank_'+str(days)
#总量计算
xxx=df_all.groupby('ts_code')['net_mf_amount'].rolling(days).sum().reset_index()
xxx.set_index(['level_1'], drop=True, append=False, inplace=True, verify_integrity=False)
xxx.drop(['ts_code'],axis=1,inplace=True)
df_all=df_all.join(xxx, lsuffix='', rsuffix=bufferbak)
#当日量排名
df_all[stringdisplay]=df_all.groupby('trade_date')[net_mf_amountstring].rank(pct=True)
#df_all=FEsingle.changerank_line(df_all,stringdisplay)
#当日数据无法获取只能获取上一日数据
df_all[stringdisplay]=df_all.groupby('ts_code')[stringdisplay].shift(shiftday)
df_all.drop([net_mf_amountstring],axis=1,inplace=True)
return df_all
def OldFeaturesRank(df_all,features,daybak):
for curfeature in features:
curstring='yesterday_'+str(daybak)+curfeature
df_all[curstring]=df_all.groupby('ts_code')[curfeature].shift(daybak)
return df_all
#一些lambda 函数
def rollingRankArgSort(array):
return array.size - array.argsort().argsort()[:-1]
def rollingRankArgSortBack(array):
return array.argsort().argsort()[-1]
def rollingRankSciPy(array):
return array.size - sc.stats.rankdata(array,method = 'ordinal')[0]
def rollingRankSciPyB(array):
return sc.stats.rankdata(array,method = 'ordinal')[-1]
| 46.241253
| 166
| 0.628245
| 43,435
| 277,540
| 3.625348
| 0.010222
| 0.175275
| 0.056012
| 0.041596
| 0.974395
| 0.971785
| 0.96706
| 0.9617
| 0.953508
| 0.949532
| 0
| 0.027832
| 0.156205
| 277,540
| 6,001
| 167
| 46.248959
| 0.644566
| 0.266859
| 0
| 0.892845
| 0
| 0
| 0.215229
| 0.002695
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029609
| false
| 0.005992
| 0.003172
| 0.002115
| 0.058865
| 0.026084
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b91de7987d28a4dcfac7dc142396db56a21183b5
| 112
|
py
|
Python
|
tests/test_app.py
|
ab7289-tandon-nyu/csgy6083_PDS_Project
|
d2b7d22274dcabbb6ae35c17a8ffd06498f3634f
|
[
"MIT"
] | null | null | null |
tests/test_app.py
|
ab7289-tandon-nyu/csgy6083_PDS_Project
|
d2b7d22274dcabbb6ae35c17a8ffd06498f3634f
|
[
"MIT"
] | null | null | null |
tests/test_app.py
|
ab7289-tandon-nyu/csgy6083_PDS_Project
|
d2b7d22274dcabbb6ae35c17a8ffd06498f3634f
|
[
"MIT"
] | null | null | null |
from app.app import create_app
def test_create_app():
assert create_app({"DEBUG": True, "TESTING": True})
| 18.666667
| 55
| 0.714286
| 17
| 112
| 4.470588
| 0.588235
| 0.355263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151786
| 112
| 5
| 56
| 22.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b951d5ffbef6eb12f22c3ded9e7faa78ff14a929
| 3,958
|
py
|
Python
|
models/initializations.py
|
JannerM/spatial-reasoning
|
e163003a33177e41ca02d5feefee3fdfca5ba154
|
[
"MIT"
] | 54
|
2017-07-14T01:08:57.000Z
|
2021-07-09T12:46:57.000Z
|
models/initializations.py
|
jannerm/spatial-reasoning
|
e163003a33177e41ca02d5feefee3fdfca5ba154
|
[
"MIT"
] | null | null | null |
models/initializations.py
|
jannerm/spatial-reasoning
|
e163003a33177e41ca02d5feefee3fdfca5ba154
|
[
"MIT"
] | 16
|
2017-07-16T03:18:19.000Z
|
2021-05-28T13:04:12.000Z
|
import models
'''
norbf (full), nobases (no gradient), nonsep
uvfa-pos, uvfa-text, cnn+lstm
(nocnn)
rbf + gradient + cnn: full
no rbf: norbf
no gradient: noglobal
no rbf / gradient: nobases
no cnn: nocnn
'''
def init(args, layout_vocab_size, object_vocab_size, text_vocab_size):
if args.model == 'full': ## new
model = init_full(args, layout_vocab_size, object_vocab_size, text_vocab_size)
elif args.model == 'no-gradient':
model = init_nogradient(args, layout_vocab_size, object_vocab_size, text_vocab_size)
elif args.model == 'cnn-lstm':
model = init_cnn_lstm(args, layout_vocab_size, object_vocab_size, text_vocab_size)
elif args.model == 'uvfa-text':
model = init_uvfa_text(args, layout_vocab_size, object_vocab_size, text_vocab_size)
# TODO: clean up UVFA-pos goal loading
elif args.model == 'uvfa-pos':
model = init_uvfa_pos(args, layout_vocab_size, object_vocab_size, text_vocab_size)
train_indices = train_goals
val_indices = val_goals
return model
def init_full(args, layout_vocab_size, object_vocab_size, text_vocab_size):
args.global_coeffs = 3
args.attention_in_dim = args.obj_embed
args.lstm_out = args.attention_in_dim * args.attention_out_dim * args.attention_kernel**2 + args.global_coeffs
state_model = models.LookupModel(layout_vocab_size, args.state_embed).cuda()
object_model = models.LookupModel(object_vocab_size, args.obj_embed)
text_model = models.TextModel(text_vocab_size, args.lstm_inp, args.lstm_hid, args.lstm_layers, args.lstm_out)
heatmap_model = models.AttentionGlobal(text_model, args, map_dim=args.map_dim).cuda()
model = models.MultiNoRBF(state_model, object_model, heatmap_model, args, map_dim=args.map_dim).cuda()
return model
def init_nogradient(args, layout_vocab_size, object_vocab_size, text_vocab_size):
args.global_coeffs = 0
args.attention_in_dim = args.obj_embed
args.lstm_out = args.attention_in_dim * args.attention_out_dim * args.attention_kernel**2
state_model = models.LookupModel(layout_vocab_size, args.state_embed).cuda()
object_model = models.LookupModel(object_vocab_size, args.obj_embed)
text_model = models.TextModel(text_vocab_size, args.lstm_inp, args.lstm_hid, args.lstm_layers, args.lstm_out)
heatmap_model = models.AttentionHeatmap(text_model, args, map_dim=args.map_dim).cuda()
model = models.MultiNoBases(state_model, object_model, heatmap_model, args, map_dim=args.map_dim).cuda()
return model
def init_cnn_lstm(args, layout_vocab_size, object_vocab_size, text_vocab_size):
args.lstm_out = 16
args.cnn_out_dim = 2*args.lstm_out
state_model = models.LookupModel(layout_vocab_size, args.state_embed)
object_model = models.LookupModel(object_vocab_size, args.obj_embed)
lstm = models.TextModel(text_vocab_size, args.lstm_inp, args.lstm_hid, args.lstm_layers, args.lstm_out)
model = models.CNN_LSTM(state_model, object_model, lstm, args).cuda()
return model
def init_uvfa_text(args, layout_vocab_size, object_vocab_size, text_vocab_size, rank = 7):
print '<Models> Using UVFA variant, consider using a lower learning rate (eg, 0.0001)'
print '<Models> UVFA rank: {} '.format(rank)
args.rank = rank
args.lstm_out = rank
text_model = models.TextModel(text_vocab_size, args.lstm_inp, args.lstm_hid, args.lstm_layers, args.lstm_out)
model = models.UVFA_text(text_model, layout_vocab_size, object_vocab_size, args, map_dim=args.map_dim).cuda()
return model
def init_uvfa_pos(args, layout_vocab_size, object_vocab_size, text_vocab_size, rank = 7):
print '<Models> Using UVFA variant, consider using a lower learning rate (eg, 0.0001)'
print '<Models> UVFA rank: {} '.format(rank)
args.rank = rank
args.lstm_out = rank
model = models.UVFA_pos(layout_vocab_size, object_vocab_size, args, map_dim=args.map_dim).cuda()
return model
| 39.188119
| 114
| 0.748863
| 599
| 3,958
| 4.614357
| 0.128548
| 0.153039
| 0.086831
| 0.09877
| 0.818741
| 0.817294
| 0.810781
| 0.810781
| 0.810781
| 0.810781
| 0
| 0.00566
| 0.151844
| 3,958
| 100
| 115
| 39.58
| 0.817694
| 0.010106
| 0
| 0.413793
| 0
| 0
| 0.065141
| 0
| 0
| 0
| 0
| 0.01
| 0
| 0
| null | null | 0
| 0.017241
| null | null | 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b99c8c0f9c25334a821b77cf7b8d642cd23edd4f
| 2,227
|
py
|
Python
|
mak/libs/pyxx/cxx/grammar/expression/primary/fold.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
mak/libs/pyxx/cxx/grammar/expression/primary/fold.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
mak/libs/pyxx/cxx/grammar/expression/primary/fold.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
"""
fold-expression:
( cast-expression fold-operator ... )
( ... fold-operator cast-expression )
( cast-expression fold-operator ... fold-operator cast-expression )
fold-operator: one of
+ - * / % ^ & | << >>
+= -= *= /= %= ^= &= |= <<= >>= =
== != < > <= >= && || , .* ->*
"""
import glrp
from ....parser import cxx17
from motor_typing import TYPE_CHECKING
@glrp.rule('fold-expression : "(" cast-expression fold-reduction fold-operator "..." ")"')
@glrp.rule('fold-expression : "(" "..." fold-operator cast-expression ")"')
@glrp.rule('fold-expression : "(" cast-expression fold-reduction fold-operator "..." fold-operator cast-expression ")"')
@cxx17
def fold_expression_cxx17(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('fold-reduction : [split:fold_expression]')
@cxx17
def fold_reduction_cxx17(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('fold-operator : "+"')
@glrp.rule('fold-operator : "-"')
@glrp.rule('fold-operator : "*"')
@glrp.rule('fold-operator : "/"')
@glrp.rule('fold-operator : "%"')
@glrp.rule('fold-operator : "^"')
@glrp.rule('fold-operator : "&"')
@glrp.rule('fold-operator : "|"')
@glrp.rule('fold-operator : "<<"')
@glrp.rule('fold-operator : ">>"')
@glrp.rule('fold-operator : "+="')
@glrp.rule('fold-operator : "-="')
@glrp.rule('fold-operator : "*="')
@glrp.rule('fold-operator : "/="')
@glrp.rule('fold-operator : "%="')
@glrp.rule('fold-operator : "^="')
@glrp.rule('fold-operator : "&="')
@glrp.rule('fold-operator : "|="')
@glrp.rule('fold-operator : "<<="')
@glrp.rule('fold-operator : ">>="')
@glrp.rule('fold-operator : "="')
@glrp.rule('fold-operator : "=="')
@glrp.rule('fold-operator : "!="')
@glrp.rule('fold-operator : "<"')
@glrp.rule('fold-operator : ">"')
@glrp.rule('fold-operator : "<="')
@glrp.rule('fold-operator : ">="')
@glrp.rule('fold-operator : "&&"')
@glrp.rule('fold-operator : "||"')
@glrp.rule('fold-operator : ","')
@glrp.rule('fold-operator : ".*"')
@glrp.rule('fold-operator : "->*"')
@cxx17
def fold_operator_cxx17(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
if TYPE_CHECKING:
from ....parser import CxxParser
| 30.094595
| 120
| 0.593624
| 247
| 2,227
| 5.311741
| 0.105263
| 0.384146
| 0.329268
| 0.487805
| 0.811738
| 0.799543
| 0.779726
| 0.779726
| 0.779726
| 0.658537
| 0
| 0.007326
| 0.141895
| 2,227
| 73
| 121
| 30.506849
| 0.679226
| 0.202066
| 0
| 0.12
| 0
| 0.02
| 0.517261
| 0.013016
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0.06
| 0.08
| 0
| 0.14
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
b99d629a7b3fc55dde94ec910192cd3bcbbd343e
| 7,165
|
py
|
Python
|
switchmng/routes/put.py
|
AnsgarKlein/switchmng
|
d86ae2dc40ef70f43fec8e3adf49ae5fd796a01d
|
[
"MIT"
] | null | null | null |
switchmng/routes/put.py
|
AnsgarKlein/switchmng
|
d86ae2dc40ef70f43fec8e3adf49ae5fd796a01d
|
[
"MIT"
] | null | null | null |
switchmng/routes/put.py
|
AnsgarKlein/switchmng
|
d86ae2dc40ef70f43fec8e3adf49ae5fd796a01d
|
[
"MIT"
] | null | null | null |
from flask import current_app
from flask import request
from switchmng.typing import FlaskResponse
from switchmng import database
from .blueprint import restbp
from .errors import *
@restbp.route('/switch_models/<string:resource_id>', methods = ['PUT'])
def put_switch_model(resource_id: str) -> FlaskResponse:
session = current_app.config['SWITCHMNG_DB_CONNECTION'].Session()
# Check request
if request.content_type != 'application/json':
return error_415(message = 'Expected Content-Type to be application/json')
if not request.accept_mimetypes.accept_json:
return error_406(message = 'Content-Type application/json is not accepted by client')
try:
req = request.json
if not isinstance(req, dict):
raise BaseException()
except:
return error_400(message = 'Request is not a valid json object')
# Set in database
try:
sm = database.set_switch_model(session, resource_id = resource_id, **req)
except BaseException as e:
return error_400(message = str(e))
return { 'status': 200,
'data': sm.jsonify() }, 200
@restbp.route('/switch_models/<string:switch_model_resource_id>/ports/<string:port_model_resource_id>', methods = ['PUT'])
def put_port_model(
switch_model_resource_id: str,
port_model_resource_id: str) -> FlaskResponse:
session = current_app.config['SWITCHMNG_DB_CONNECTION'].Session()
# Check request
if request.content_type != 'application/json':
return error_415(message = 'Expected Content-Type to be application/json')
if not request.accept_mimetypes.accept_json:
return error_406(message = 'Content-Type application/json is not accepted by client')
try:
req = request.json
if not isinstance(req, dict):
raise BaseException()
except:
return error_400(message = 'Request is not a valid json object')
# Set in database
try:
pm = database.set_port_model(
session,
switch_model_resource_id = switch_model_resource_id,
port_model_resource_id = port_model_resource_id,
**req)
except BaseException as e:
return error_400(message = str(e))
return { 'status': 200,
'data': pm.jsonify() }, 200
@restbp.route('/switches/<string:resource_id>', methods = ['PUT'])
def put_switch(resource_id: str) -> FlaskResponse:
session = current_app.config['SWITCHMNG_DB_CONNECTION'].Session()
# Check request
if request.content_type != 'application/json':
return error_415(message = 'Expected Content-Type to be application/json')
if not request.accept_mimetypes.accept_json:
return error_406(message = 'Content-Type application/json is not accepted by client')
try:
req = request.json
if not isinstance(req, dict):
raise BaseException()
except:
return error_400(message = 'Request is not a valid json object')
# Set in database
try:
sw = database.set_switch(session, resource_id = resource_id, **req)
except BaseException as e:
return error_400(message = str(e))
return { 'status': 200,
'data': sw.jsonify() }, 200
@restbp.route('/switches/<string:switch_resource_id>/ports/<string:port_resource_id>', methods = ['PUT'])
def put_port(switch_resource_id: str, port_resource_id: str) -> FlaskResponse:
session = current_app.config['SWITCHMNG_DB_CONNECTION'].Session()
# Check request
if request.content_type != 'application/json':
return error_415(message = 'Expected Content-Type to be application/json')
if not request.accept_mimetypes.accept_json:
return error_406(message = 'Content-Type application/json is not accepted by client')
try:
req = request.json
if not isinstance(req, dict):
raise BaseException()
except:
return error_400(message = 'Request is not a valid json object')
# Set in database
try:
pt = database.set_port(
session,
switch_resource_id = switch_resource_id,
port_resource_id = port_resource_id,
**req)
except BaseException as e:
return error_400(message = str(e))
return { 'status': 200,
'data': pt.jsonify() }, 200
@restbp.route('/network_protocols/<string:resource_id>', methods = ['PUT'])
def put_network_protocols(resource_id: str) -> FlaskResponse:
session = current_app.config['SWITCHMNG_DB_CONNECTION'].Session()
# Check request
if request.content_type != 'application/json':
return error_415(message = 'Expected Content-Type to be application/json')
if not request.accept_mimetypes.accept_json:
return error_406(message = 'Content-Type application/json is not accepted by client')
try:
req = request.json
if not isinstance(req, dict):
raise BaseException()
except:
return error_400(message = 'Request is not a valid json object')
# Set in database
try:
np = database.set_network_protocol(session, resource_id = resource_id, **req)
except BaseException as e:
return error_400(message = str(e))
return { 'status': 200,
'data': np.jsonify() }, 200
@restbp.route('/connectors/<string:resource_id>', methods = ['PUT'])
def put_connector(resource_id: str) -> FlaskResponse:
session = current_app.config['SWITCHMNG_DB_CONNECTION'].Session()
# Check request
if request.content_type != 'application/json':
return error_415(message = 'Expected Content-Type to be application/json')
if not request.accept_mimetypes.accept_json:
return error_406(message = 'Content-Type application/json is not accepted by client')
try:
req = request.json
if not isinstance(req, dict):
raise BaseException()
except:
return error_400(message = 'Request is not a valid json object')
# Set in database
try:
cn = database.set_connector(session, resource_id = resource_id, **req)
except BaseException as e:
return error_400(message = str(e))
return { 'status': 200,
'data': cn.jsonify() }, 200
@restbp.route('/vlans/<int:resource_id>', methods = ['PUT'])
def put_vlan(resource_id: str) -> FlaskResponse:
session = current_app.config['SWITCHMNG_DB_CONNECTION'].Session()
# Check request
if request.content_type != 'application/json':
return error_415(message = 'Expected Content-Type to be application/json')
if not request.accept_mimetypes.accept_json:
return error_406(message = 'Content-Type application/json is not accepted by client')
try:
req = request.json
if not isinstance(req, dict):
raise BaseException()
except:
return error_400(message = 'Request is not a valid json object')
# Set in database
try:
vl = database.set_vlan(session, resource_id = resource_id, **req)
except BaseException as e:
return error_400(message = str(e))
return { 'status': 200,
'data': vl.jsonify() }, 200
| 36.93299
| 122
| 0.667132
| 892
| 7,165
| 5.186099
| 0.096413
| 0.077821
| 0.06658
| 0.078686
| 0.874838
| 0.841548
| 0.822093
| 0.785992
| 0.769563
| 0.769563
| 0
| 0.022843
| 0.230147
| 7,165
| 193
| 123
| 37.124352
| 0.815809
| 0.02917
| 0
| 0.731544
| 0
| 0
| 0.231955
| 0.068578
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04698
| false
| 0
| 0.040268
| 0
| 0.322148
| 0.006711
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b9cd3066792449ab3d3e68612fab48e979ac2945
| 33,153
|
py
|
Python
|
intel_ecs_wiki_search_backup.py
|
kundansaha82/WikiChatbot
|
24ad28f0964ad28cbe8fca96b40bd8974fe5b7ea
|
[
"MIT"
] | null | null | null |
intel_ecs_wiki_search_backup.py
|
kundansaha82/WikiChatbot
|
24ad28f0964ad28cbe8fca96b40bd8974fe5b7ea
|
[
"MIT"
] | null | null | null |
intel_ecs_wiki_search_backup.py
|
kundansaha82/WikiChatbot
|
24ad28f0964ad28cbe8fca96b40bd8974fe5b7ea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 13:25:29 2021
@author: kundankantisaha
"""
from elasticsearch import Elasticsearch
from os import popen
"""import logging"""
import subprocess
from lemma_tokenizer import Splitter as Splitter
from lemma_tokenizer import LemmatizationWithPOSTagger as LemmatizationWithPOSTagger
import json
import lemma_tokenizer
import scrapy
from scrapy import Request
def start_cluster():
subprocess.Popen('C:\\Users\\kundansa\\Downloads\\elasticsearch-7.10.2\\bin\\elasticsearch.bat')
"""time.sleep(15)"""
def connect_elasticsearch():
es = None
es = Elasticsearch([{'host': '127.0.0.1', 'port': '9200'}])
if es.ping():
print('Yay Connect')
else:
print('Awww it could not connect!')
return es
source_link = ""
keywordoriginal = ""
def print_content(response):
print("Printing content")
with open("C:\\Users\\kundansa\\ecswikifin\\ecswikifin\\spiders\\data_file7.json", "a") as filee:
filee.write('[')
sub=""
sub_next=""
h2 = response.xpath("//div[@class='wiki-content']")
lines = ""
title=response.xpath("//head/title/text()").extract()
heading=response.xpath("//div[@class='wiki-content']/h2")
print(title)
for head in response.xpath("//div[@class='wiki-content']/h1"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h1[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h1[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h1/strong"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h1[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h1[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in heading:
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h2[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h2[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h2/strong"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h2[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h2[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h3"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h3[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h3[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h3/strong"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h3[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h3[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h4"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h4[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h4[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h4/strong"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h4[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h4[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h5"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h5[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h5[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h5/strong"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h5[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h5[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h6"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h6[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h6[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h6/strong"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h6[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h6[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h7"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h7[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h7[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/h7/strong"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::h7[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::h7[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/ul"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::ul[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::ul[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/ul/strong"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::ul[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::ul[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/ol"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::ol[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::ol[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
for head in response.xpath("//div[@class='wiki-content']/ol/strong"):
sub=str(head.xpath("text()").extract())
sub = sub.replace("[","")
sub = sub.replace("'","")
sub = sub.replace("]","")
if sub == keywordoriginal:
sub_next=str(head.xpath("following-sibling::ol[1]//text()").extract())
sub_next = sub_next.replace("[","")
sub_next = sub_next.replace("'","")
sub_next = sub_next.replace("]","")
if sub_next != "":
line = head.xpath(".//following-sibling::*[count(following-sibling::ol[1])]//text()").extract()
json.dump({
'line':(str(line))
}, filee
)
filee.write(']')
print(str(line))
break
else:
for i in range(1, 6):
i_char = str(i)
str_xpath = ".//following::text()[" + i_char + "]"
lines = lines + str(head.xpath(str_xpath).extract())
json.dump({
'line':(lines)
}, filee
)
filee.write('...]')
print(str(lines))
break
yield response
"""if __name__ == '__main__':
logging.basicConfig(level=logging.ERROR)"""
start_cluster()
es = connect_elasticsearch()
looping_condition = True
"""key_terms = ['backup', 'Security', 'DPDK', 'Driver', 'OVS', 'ADQ','Containers','NVMe over TCP','Memcached','Netperf','Redis','Aerospike'
'RDMA','DDP','DPR','AF_XDP','ECS','Columbiaville','OEM','ETA','Ethernet','Pre-Boot']"""
while looping_condition:
found_terms = list()
input_string = input("Enter your query/ enter '-1' to exit :")
if ("-1" == input_string):
looping_condition = False
continue
lemma_tokenizer.input_string = input_string
splitter = Splitter()
lemmatization_using_pos_tagger = LemmatizationWithPOSTagger()
#step 1 split document into sentence followed by tokenization
tokens = splitter.split(input_string)
#step 2 lemmatization using pos tagger
lemma_pos_token = lemmatization_using_pos_tagger.pos_tag(tokens)
with open('input_tokens.json', 'w') as f:
for lemmaset in lemma_pos_token:
for i in range(len(lemmaset)):
data = {}
data["words"] = []
data["words"].append({
"Original Word": lemmaset[i][0],
"Lemmatized Word": lemmaset[i][1],
"POS Tag": lemmaset[i][2]
})
json.dump(data, f)
string_found_terms = ""
allowed_pos_tags = [["NNP"],["NNS"],["NN"],["VB"],["NNPS"],["CD"]]
for lemmaset in lemma_pos_token:
for i in range(len(lemmaset)):
if lemmaset[i][2] in allowed_pos_tags:
found_terms.append(lemmaset[i][1])
string_found_terms = ' '.join(found_terms)
search_param = {
"query": {
"simple_query_string" : {
"query": string_found_terms,
"fields": ["title", "keyword"],
"default_operator": "and"
}
}
}
res = es.search(index="wikifin", body=search_param)
"""print("%d documents found" % res['hits']['total'])"""
data = [doc for doc in res['hits']['hits']]
resulting_search = ""
for doc in data:
resulting_search = doc['_source']['keyword']
original_search_phrase = doc['_source']['keywordoriginal']
print("")
print("%s" % original_search_phrase)
keywordoriginal = original_search_phrase
lemma_tokenizer.input_string = resulting_search
splitter = Splitter()
lemmatization_using_pos_tagger = LemmatizationWithPOSTagger()
tokens_out = splitter.split(resulting_search)
lemma_pos_token_out = lemmatization_using_pos_tagger.pos_tag(tokens_out)
with open('output_tokens.json', 'w') as f:
for lemmaset_out in lemma_pos_token_out:
for i in range(len(lemmaset_out)):
data = {}
data["words"] = []
data["words"].append({
"Original Word": lemmaset_out[i][0],
"Lemmatized Word": lemmaset_out[i][1],
"POS Tag": lemmaset_out[i][2]
})
json.dump(data, f)
source_link = (doc['_source']['link'])
curl_string1 = 'curl -H "Authorization: Basic S1VOREFOU0E6QW5pS3VuQDk5MDM=" -X GET -H "Content-Type: application/json" '+source_link
response = Request.from_curl(curl_string1, callback = print_content)
print("%s" % source_link)
| 45.229195
| 150
| 0.367569
| 2,646
| 33,153
| 4.484127
| 0.086546
| 0.085546
| 0.054614
| 0.063717
| 0.819132
| 0.812389
| 0.80531
| 0.780784
| 0.780784
| 0.772693
| 0
| 0.00974
| 0.489006
| 33,153
| 732
| 151
| 45.290984
| 0.690632
| 0.00546
| 0
| 0.791225
| 0
| 0.003026
| 0.122042
| 0.092058
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004539
| false
| 0
| 0.013616
| 0
| 0.019667
| 0.068079
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b9cd5bcd59d123efd59e0f06bd10aa7f7a7aa6cb
| 12,034
|
py
|
Python
|
photix/fields.py
|
dimitri-yatsenko/photix
|
906e5637c8e8172e1f57c3a6f04b55db355effb2
|
[
"MIT"
] | null | null | null |
photix/fields.py
|
dimitri-yatsenko/photix
|
906e5637c8e8172e1f57c3a6f04b55db355effb2
|
[
"MIT"
] | null | null | null |
photix/fields.py
|
dimitri-yatsenko/photix
|
906e5637c8e8172e1f57c3a6f04b55db355effb2
|
[
"MIT"
] | null | null | null |
import numpy as np
import datajoint as dj
from .tracer import SpaceTracer
from matplotlib import pyplot as plt
schema = dj.schema('photixxx')
@schema
class DSim(dj.Lookup):
definition = """
# Detector Field Specification
dsim : int
---
detector_type='one-sided' : varchar(30) # choice in simulation
detector_width=10.00: decimal(5,2) # (um) along x-axis
detector_height=10.00: decimal(5,2) # (um) along y-axis
anisotropy = 0.88 : float # factor in the Henyey-Greenstein formula
absorption_length: float # (um) average travel path before a absoprtion event
scatter_length : float # (um) average travel path before a scatter event
volume_dimx = 1000 : int unsigned # (voxels)
volume_dimy = 1000 : int unsigned # (voxels)
volume_dimz = 1000 : int unsigned # (voxels)
pitch = 2.2 : float # (um) spatial sampling period of the model volume
"""
contents = [
dict(dsim=0, detector_type='one-sided', detector_height=50, scatter_length=50, absorption_length=1.5e4),
dict(dsim=1, detector_type='one-sided', detector_height=20, scatter_length=50, absorption_length=1.5e4),
dict(dsim=2, detector_type='narrowed2', detector_height=20, scatter_length=50, absorption_length=1.5e4),
dict(dsim=4, detector_type='narrowed4', detector_height=20, scatter_length=50, absorption_length=1.5e4),
dict(dsim=8, detector_type='narrowed8', detector_height=20, scatter_length=50, absorption_length=1.5e4),
dict(dsim=10, detector_type='narrowed10', detector_height=20, scatter_length=50, absorption_length=1.5e4),
dict(dsim=14, detector_type='narrowed4', detector_height=20, scatter_length=50, absorption_length=1500),
dict(dsim=18, detector_type='narrowed8', detector_height=20, scatter_length=50, absorption_length=500)]
def make_volume(self, hops=100_000):
spec = self.fetch1()
kwargs = {k: spec[k] for k in spec if k in {
'pitch', 'anisotropy', 'scatter_length', 'absorption_length', 'detector_type'}}
kwargs.update(
dims=tuple(spec[k] for k in ('volume_dimx', 'volume_dimy', 'volume_dimz')),
emitter_spread='spherical',
emitter_size=(float(spec['detector_width']), float(spec['detector_height']), 0))
space = SpaceTracer(**kwargs)
space.run(hops=hops)
space.volume *= space.emitter_area
return space
@schema
class ESim(dj.Lookup):
definition = """
# Emission Field Specification
esim : int
---
beam_compression : float
y_steer : float # the steer angle in the plane of the shank
emitter_width=10.00: decimal(5,2) # (um) along x-axis
emitter_height=10.00: decimal(5,2) # (um) along y-axis
anisotropy = 0.88 : float # factor in the Henyey-Greenstein formula
absorption_length: float # (um) average travel path before a absoprtion event
scatter_length: float # (um) average travel path before a scatter event
volume_dimx = 1000 : int unsigned # (voxels)
volume_dimy = 1000 : int unsigned # (voxels)
volume_dimz = 1000 : int unsigned # (voxels)
beam_xy_aspect = 1.0 : float # compression of y. E.g. 2.0 means that y is compressed by factor of 2
pitch = 2.2 : float # (um) spatial sampling period of the model volume
"""
contents = [
dict(esim=0, beam_compression=1.0, y_steer=0.0, scatter_length=50, absorption_length=1.5e4),
dict(esim=10, beam_compression=1 / 4, y_steer=-np.pi / 3, scatter_length=50, absorption_length=1.5e4),
dict(esim=11, beam_compression=1 / 4, y_steer=-np.pi / 4, scatter_length=50, absorption_length=1.5e4),
dict(esim=12, beam_compression=1 / 4, y_steer=-np.pi / 6, scatter_length=50, absorption_length=1.5e4),
dict(esim=13, beam_compression=1 / 4, y_steer=-np.pi / 12, scatter_length=50, absorption_length=1.5e4),
dict(esim=14, beam_compression=1 / 4, y_steer=0, scatter_length=50, absorption_length=1.5e4),
dict(esim=15, beam_compression=1 / 4, y_steer=+np.pi / 12, scatter_length=50, absorption_length=1.5e4),
dict(esim=16, beam_compression=1 / 4, y_steer=+np.pi / 6, scatter_length=50, absorption_length=1.5e4),
dict(esim=17, beam_compression=1 / 4, y_steer=+np.pi / 4, scatter_length=50, absorption_length=1.5e4),
dict(esim=18, beam_compression=1 / 4, y_steer=+np.pi / 3, scatter_length=50, absorption_length=1.5e4),
dict(esim=20, beam_compression=1 / 3, y_steer=-np.pi / 3, scatter_length=50, absorption_length=1.5e4),
dict(esim=21, beam_compression=1 / 3, y_steer=-np.pi / 4, scatter_length=50, absorption_length=1.5e4),
dict(esim=22, beam_compression=1 / 3, y_steer=-np.pi / 6, scatter_length=50, absorption_length=1.5e4),
dict(esim=23, beam_compression=1 / 3, y_steer=-np.pi / 12, scatter_length=50, absorption_length=1.5e4),
dict(esim=24, beam_compression=1 / 3, y_steer=0, scatter_length=50, absorption_length=1.5e4),
dict(esim=25, beam_compression=1 / 3, y_steer=+np.pi / 12, scatter_length=50, absorption_length=1.5e4),
dict(esim=26, beam_compression=1 / 3, y_steer=+np.pi / 6, scatter_length=50, absorption_length=1.5e4),
dict(esim=27, beam_compression=1 / 3, y_steer=+np.pi / 4, scatter_length=50, absorption_length=1.5e4),
dict(esim=28, beam_compression=1 / 3, y_steer=+np.pi / 3, scatter_length=50, absorption_length=1.5e4),
dict(esim=30, beam_compression=1 / 6, y_steer=-np.pi / 3, scatter_length=50, absorption_length=1.5e4),
dict(esim=31, beam_compression=1 / 6, y_steer=-np.pi / 4, scatter_length=50, absorption_length=1.5e4),
dict(esim=32, beam_compression=1 / 6, y_steer=-np.pi / 6, scatter_length=50, absorption_length=1.5e4),
dict(esim=33, beam_compression=1 / 6, y_steer=-np.pi / 12, scatter_length=50, absorption_length=1.5e4),
dict(esim=34, beam_compression=1 / 6, y_steer=0, scatter_length=50, absorption_length=1.5e4),
dict(esim=35, beam_compression=1 / 6, y_steer=+np.pi / 12, scatter_length=50, absorption_length=1.5e4),
dict(esim=36, beam_compression=1 / 6, y_steer=+np.pi / 6, scatter_length=50, absorption_length=1.5e4),
dict(esim=37, beam_compression=1 / 6, y_steer=+np.pi / 4, scatter_length=50, absorption_length=1.5e4),
dict(esim=38, beam_compression=1 / 6, y_steer=+np.pi / 3, scatter_length=50, absorption_length=1.5e4),
dict(esim=40, beam_compression=1 / 12, y_steer=-np.pi / 3, scatter_length=50, absorption_length=1.5e4),
dict(esim=41, beam_compression=1 / 12, y_steer=-np.pi / 4, scatter_length=50, absorption_length=1.5e4),
dict(esim=42, beam_compression=1 / 12, y_steer=-np.pi / 6, scatter_length=50, absorption_length=1.5e4),
dict(esim=43, beam_compression=1 / 12, y_steer=-np.pi / 12, scatter_length=50, absorption_length=1.5e4),
dict(esim=44, beam_compression=1 / 12, y_steer=0, scatter_length=50, absorption_length=1.5e4),
dict(esim=45, beam_compression=1 / 12, y_steer=+np.pi / 12, scatter_length=50, absorption_length=1.5e4),
dict(esim=46, beam_compression=1 / 12, y_steer=+np.pi / 6, scatter_length=50, absorption_length=1.5e4),
dict(esim=47, beam_compression=1 / 12, y_steer=+np.pi / 4, scatter_length=50, absorption_length=1.5e4),
dict(esim=48, beam_compression=1 / 12, y_steer=+np.pi / 3, scatter_length=50, absorption_length=1.5e4),
# tissue tanning
dict(esim=130, beam_compression=1 / 6, y_steer=-np.pi / 3, scatter_length=50, absorption_length=500),
dict(esim=131, beam_compression=1 / 6, y_steer=-np.pi / 4, scatter_length=50, absorption_length=500),
dict(esim=132, beam_compression=1 / 6, y_steer=-np.pi / 6, scatter_length=50, absorption_length=500),
dict(esim=133, beam_compression=1 / 6, y_steer=-np.pi / 12, scatter_length=50, absorption_length=500),
dict(esim=134, beam_compression=1 / 6, y_steer=0, scatter_length=50, absorption_length=500),
dict(esim=135, beam_compression=1 / 6, y_steer=+np.pi / 12, scatter_length=50, absorption_length=500),
dict(esim=136, beam_compression=1 / 6, y_steer=+np.pi / 6, scatter_length=50, absorption_length=500),
dict(esim=137, beam_compression=1 / 6, y_steer=+np.pi / 4, scatter_length=50, absorption_length=500),
dict(esim=138, beam_compression=1 / 6, y_steer=+np.pi / 3, scatter_length=50, absorption_length=500),
dict(esim=140, beam_compression=1 / 12, y_steer=-np.pi / 3, scatter_length=50, absorption_length=500),
dict(esim=141, beam_compression=1 / 12, y_steer=-np.pi / 4, scatter_length=50, absorption_length=500),
dict(esim=142, beam_compression=1 / 12, y_steer=-np.pi / 6, scatter_length=50, absorption_length=500),
dict(esim=143, beam_compression=1 / 12, y_steer=-np.pi / 12, scatter_length=50, absorption_length=500),
dict(esim=144, beam_compression=1 / 12, y_steer=0, scatter_length=50, absorption_length=500),
dict(esim=145, beam_compression=1 / 12, y_steer=+np.pi / 12, scatter_length=50, absorption_length=500),
dict(esim=146, beam_compression=1 / 12, y_steer=+np.pi / 6, scatter_length=50, absorption_length=500),
dict(esim=147, beam_compression=1 / 12, y_steer=+np.pi / 4, scatter_length=50, absorption_length=500),
dict(esim=148, beam_compression=1 / 12, y_steer=+np.pi / 3, scatter_length=50, absorption_length=500),
dict(esim=320, beam_compression=1 / 3, y_steer=-np.pi / 3, scatter_length=50, absorption_length=1500),
dict(esim=321, beam_compression=1 / 3, y_steer=-np.pi / 4, scatter_length=50, absorption_length=1500),
dict(esim=322, beam_compression=1 / 3, y_steer=-np.pi / 6, scatter_length=50, absorption_length=1500),
dict(esim=323, beam_compression=1 / 3, y_steer=-np.pi / 12, scatter_length=50, absorption_length=1500),
dict(esim=324, beam_compression=1 / 3, y_steer=0, scatter_length=50, absorption_length=1500),
dict(esim=325, beam_compression=1 / 3, y_steer=+np.pi / 12, scatter_length=50, absorption_length=1500),
dict(esim=326, beam_compression=1 / 3, y_steer=+np.pi / 6, scatter_length=50, absorption_length=1500),
dict(esim=327, beam_compression=1 / 3, y_steer=+np.pi / 4, scatter_length=50, absorption_length=1500),
dict(esim=328, beam_compression=1 / 3, y_steer=+np.pi / 3, scatter_length=50, absorption_length=1500),
# tissue tanning
dict(esim=330, beam_compression=1 / 6, y_steer=-np.pi / 3, scatter_length=50, absorption_length=1500),
dict(esim=331, beam_compression=1 / 6, y_steer=-np.pi / 4, scatter_length=50, absorption_length=1500),
dict(esim=332, beam_compression=1 / 6, y_steer=-np.pi / 6, scatter_length=50, absorption_length=1500),
dict(esim=333, beam_compression=1 / 6, y_steer=-np.pi / 12, scatter_length=50, absorption_length=1500),
dict(esim=334, beam_compression=1 / 6, y_steer=0, scatter_length=50, absorption_length=1500),
dict(esim=335, beam_compression=1 / 6, y_steer=+np.pi / 12, scatter_length=50, absorption_length=1500),
dict(esim=336, beam_compression=1 / 6, y_steer=+np.pi / 6, scatter_length=50, absorption_length=1500),
dict(esim=337, beam_compression=1 / 6, y_steer=+np.pi / 4, scatter_length=50, absorption_length=1500),
dict(esim=338, beam_compression=1 / 6, y_steer=+np.pi / 3, scatter_length=50, absorption_length=1500),]
def make_volume(self, hops=100_000):
spec = self.fetch1()
kwargs = {k: spec[k] for k in spec if k in {
'pitch', 'anisotropy', 'scatter_length',
'y_steer', 'beam_compression', 'beam_xy_aspect',
'absorption_length'}}
kwargs.update(
dims=tuple(spec[k] for k in ('volume_dimx', 'volume_dimy', 'volume_dimz')),
emitter_size=(float(spec['emitter_width']), float(spec['emitter_height']), 0))
space = SpaceTracer(**kwargs)
space.run(hops=hops)
return space
| 68.765714
| 114
| 0.686804
| 1,867
| 12,034
| 4.228709
| 0.108195
| 0.172261
| 0.153895
| 0.256491
| 0.872831
| 0.872831
| 0.863458
| 0.862445
| 0.862445
| 0.845598
| 0
| 0.098334
| 0.176915
| 12,034
| 174
| 115
| 69.16092
| 0.698738
| 0.00241
| 0
| 0.27027
| 0
| 0.006757
| 0.159653
| 0.007416
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013514
| false
| 0
| 0.027027
| 0
| 0.094595
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e032f370f72ea3b74646dc41ef5cd3a72888786d
| 70,459
|
py
|
Python
|
decision trees/python/monkdata.py
|
Manu-Fraile/Machine-Learning
|
7428a594b07c23b6b4326ad3f80b11860ac8d507
|
[
"MIT"
] | 1
|
2020-12-02T18:48:32.000Z
|
2020-12-02T18:48:32.000Z
|
decision trees/python/monkdata.py
|
Manu-Fraile/Machine-Learning
|
7428a594b07c23b6b4326ad3f80b11860ac8d507
|
[
"MIT"
] | null | null | null |
decision trees/python/monkdata.py
|
Manu-Fraile/Machine-Learning
|
7428a594b07c23b6b4326ad3f80b11860ac8d507
|
[
"MIT"
] | null | null | null |
class Sample:
"Represenation of data samles"
def __init__(self, positive, values, identity):
self.positive = positive
self.attribute = dict(zip(attributes, values))
self.identity = identity
class Attribute:
"Label for each attribute"
def __init__(self, name, values):
self.name = name
self.values = values
def __repr__(self):
return self.name
attributes = (Attribute('A1', (1, 2, 3)),
Attribute('A2', (1, 2, 3)),
Attribute('A3', (1, 2)),
Attribute('A4', (1, 2, 3)),
Attribute('A5', (1, 2, 3, 4)),
Attribute('A6', (1, 2)))
monk1 = (
Sample(True, (1, 1, 1, 1, 3, 1), 5),
Sample(True, (1, 1, 1, 1, 3, 2), 6),
Sample(True, (1, 1, 1, 3, 2, 1), 19),
Sample(True, (1, 1, 1, 3, 3, 2), 22),
Sample(True, (1, 1, 2, 1, 2, 1), 27),
Sample(True, (1, 1, 2, 1, 2, 2), 28),
Sample(True, (1, 1, 2, 2, 3, 1), 37),
Sample(True, (1, 1, 2, 2, 4, 1), 39),
Sample(True, (1, 1, 2, 3, 1, 2), 42),
Sample(True, (1, 2, 1, 1, 1, 2), 50),
Sample(False, (1, 2, 1, 1, 2, 1), 51),
Sample(False, (1, 2, 1, 1, 3, 1), 53),
Sample(False, (1, 2, 1, 1, 4, 2), 56),
Sample(True, (1, 2, 1, 2, 1, 1), 57),
Sample(False, (1, 2, 1, 2, 3, 1), 61),
Sample(False, (1, 2, 1, 2, 3, 2), 62),
Sample(False, (1, 2, 1, 2, 4, 2), 64),
Sample(False, (1, 2, 1, 3, 2, 1), 67),
Sample(False, (1, 2, 1, 3, 4, 2), 72),
Sample(False, (1, 2, 2, 1, 2, 2), 76),
Sample(False, (1, 2, 2, 2, 3, 2), 86),
Sample(False, (1, 2, 2, 2, 4, 1), 87),
Sample(False, (1, 2, 2, 2, 4, 2), 88),
Sample(False, (1, 2, 2, 3, 2, 2), 92),
Sample(False, (1, 2, 2, 3, 3, 1), 93),
Sample(False, (1, 2, 2, 3, 3, 2), 94),
Sample(False, (1, 3, 1, 1, 2, 1), 99),
Sample(False, (1, 3, 1, 1, 4, 1), 103),
Sample(False, (1, 3, 1, 2, 2, 1), 107),
Sample(False, (1, 3, 1, 2, 4, 1), 111),
Sample(True, (1, 3, 1, 3, 1, 2), 114),
Sample(False, (1, 3, 1, 3, 2, 2), 116),
Sample(False, (1, 3, 1, 3, 3, 1), 117),
Sample(False, (1, 3, 1, 3, 4, 1), 119),
Sample(False, (1, 3, 1, 3, 4, 2), 120),
Sample(False, (1, 3, 2, 1, 2, 2), 124),
Sample(True, (1, 3, 2, 2, 1, 2), 130),
Sample(False, (1, 3, 2, 2, 2, 2), 132),
Sample(False, (1, 3, 2, 2, 3, 2), 134),
Sample(False, (1, 3, 2, 2, 4, 1), 135),
Sample(False, (1, 3, 2, 2, 4, 2), 136),
Sample(True, (1, 3, 2, 3, 1, 1), 137),
Sample(False, (1, 3, 2, 3, 2, 1), 139),
Sample(False, (1, 3, 2, 3, 4, 1), 143),
Sample(False, (1, 3, 2, 3, 4, 2), 144),
Sample(False, (2, 1, 1, 1, 3, 1), 149),
Sample(False, (2, 1, 1, 1, 3, 2), 150),
Sample(True, (2, 1, 1, 2, 1, 1), 153),
Sample(True, (2, 1, 1, 2, 1, 2), 154),
Sample(False, (2, 1, 1, 2, 2, 2), 156),
Sample(False, (2, 1, 1, 2, 3, 1), 157),
Sample(False, (2, 1, 1, 2, 4, 1), 159),
Sample(False, (2, 1, 1, 2, 4, 2), 160),
Sample(False, (2, 1, 1, 3, 4, 1), 167),
Sample(False, (2, 1, 2, 1, 2, 2), 172),
Sample(False, (2, 1, 2, 1, 3, 1), 173),
Sample(False, (2, 1, 2, 1, 4, 2), 176),
Sample(False, (2, 1, 2, 2, 3, 1), 181),
Sample(False, (2, 1, 2, 2, 4, 2), 184),
Sample(False, (2, 1, 2, 3, 2, 2), 188),
Sample(False, (2, 1, 2, 3, 4, 1), 191),
Sample(True, (2, 2, 1, 1, 2, 1), 195),
Sample(True, (2, 2, 1, 1, 2, 2), 196),
Sample(True, (2, 2, 1, 1, 3, 1), 197),
Sample(True, (2, 2, 1, 2, 3, 2), 206),
Sample(True, (2, 2, 1, 3, 1, 1), 209),
Sample(True, (2, 2, 1, 3, 1, 2), 210),
Sample(True, (2, 2, 1, 3, 2, 2), 212),
Sample(True, (2, 2, 1, 3, 3, 2), 214),
Sample(True, (2, 2, 1, 3, 4, 2), 216),
Sample(True, (2, 2, 2, 1, 1, 1), 217),
Sample(True, (2, 2, 2, 1, 3, 2), 222),
Sample(True, (2, 2, 2, 1, 4, 1), 223),
Sample(True, (2, 2, 2, 1, 4, 2), 224),
Sample(True, (2, 2, 2, 2, 2, 1), 227),
Sample(True, (2, 2, 2, 3, 4, 1), 239),
Sample(True, (2, 3, 1, 1, 1, 1), 241),
Sample(True, (2, 3, 1, 2, 1, 1), 249),
Sample(False, (2, 3, 1, 2, 3, 1), 253),
Sample(True, (2, 3, 1, 3, 1, 2), 258),
Sample(False, (2, 3, 1, 3, 3, 1), 261),
Sample(False, (2, 3, 1, 3, 4, 2), 264),
Sample(False, (2, 3, 2, 1, 3, 2), 270),
Sample(True, (2, 3, 2, 2, 1, 1), 273),
Sample(True, (2, 3, 2, 2, 1, 2), 274),
Sample(False, (2, 3, 2, 2, 2, 1), 275),
Sample(False, (2, 3, 2, 3, 3, 2), 286),
Sample(True, (3, 1, 1, 1, 1, 1), 289),
Sample(True, (3, 1, 1, 1, 1, 2), 290),
Sample(True, (3, 1, 1, 2, 1, 1), 297),
Sample(False, (3, 1, 1, 2, 2, 2), 300),
Sample(False, (3, 1, 1, 3, 2, 2), 308),
Sample(True, (3, 1, 2, 1, 1, 1), 313),
Sample(False, (3, 1, 2, 1, 2, 2), 316),
Sample(False, (3, 1, 2, 2, 2, 2), 324),
Sample(False, (3, 1, 2, 2, 3, 2), 326),
Sample(False, (3, 1, 2, 3, 2, 2), 332),
Sample(True, (3, 2, 1, 1, 1, 1), 337),
Sample(False, (3, 2, 1, 1, 4, 2), 344),
Sample(True, (3, 2, 1, 2, 1, 2), 346),
Sample(False, (3, 2, 1, 2, 4, 2), 352),
Sample(True, (3, 2, 2, 1, 1, 1), 361),
Sample(True, (3, 2, 2, 1, 1, 2), 362),
Sample(False, (3, 2, 2, 1, 3, 2), 366),
Sample(True, (3, 2, 2, 3, 1, 1), 377),
Sample(False, (3, 2, 2, 3, 2, 1), 379),
Sample(False, (3, 2, 2, 3, 4, 1), 383),
Sample(True, (3, 3, 1, 1, 1, 1), 385),
Sample(True, (3, 3, 1, 1, 2, 1), 387),
Sample(True, (3, 3, 1, 1, 4, 2), 392),
Sample(True, (3, 3, 1, 2, 3, 2), 398),
Sample(True, (3, 3, 1, 2, 4, 2), 400),
Sample(True, (3, 3, 1, 3, 1, 2), 402),
Sample(True, (3, 3, 1, 3, 2, 1), 403),
Sample(True, (3, 3, 1, 3, 2, 2), 404),
Sample(True, (3, 3, 1, 3, 4, 2), 408),
Sample(True, (3, 3, 2, 1, 1, 1), 409),
Sample(True, (3, 3, 2, 1, 3, 2), 414),
Sample(True, (3, 3, 2, 1, 4, 1), 415),
Sample(True, (3, 3, 2, 1, 4, 2), 416),
Sample(True, (3, 3, 2, 3, 1, 2), 426),
Sample(True, (3, 3, 2, 3, 2, 2), 428),
Sample(True, (3, 3, 2, 3, 3, 2), 430),
Sample(True, (3, 3, 2, 3, 4, 2), 432))
monk1test = (
Sample(True, (1, 1, 1, 1, 1, 1), 1),
Sample(True, (1, 1, 1, 1, 1, 2), 2),
Sample(True, (1, 1, 1, 1, 2, 1), 3),
Sample(True, (1, 1, 1, 1, 2, 2), 4),
Sample(True, (1, 1, 1, 1, 3, 1), 5),
Sample(True, (1, 1, 1, 1, 3, 2), 6),
Sample(True, (1, 1, 1, 1, 4, 1), 7),
Sample(True, (1, 1, 1, 1, 4, 2), 8),
Sample(True, (1, 1, 1, 2, 1, 1), 9),
Sample(True, (1, 1, 1, 2, 1, 2), 10),
Sample(True, (1, 1, 1, 2, 2, 1), 11),
Sample(True, (1, 1, 1, 2, 2, 2), 12),
Sample(True, (1, 1, 1, 2, 3, 1), 13),
Sample(True, (1, 1, 1, 2, 3, 2), 14),
Sample(True, (1, 1, 1, 2, 4, 1), 15),
Sample(True, (1, 1, 1, 2, 4, 2), 16),
Sample(True, (1, 1, 1, 3, 1, 1), 17),
Sample(True, (1, 1, 1, 3, 1, 2), 18),
Sample(True, (1, 1, 1, 3, 2, 1), 19),
Sample(True, (1, 1, 1, 3, 2, 2), 20),
Sample(True, (1, 1, 1, 3, 3, 1), 21),
Sample(True, (1, 1, 1, 3, 3, 2), 22),
Sample(True, (1, 1, 1, 3, 4, 1), 23),
Sample(True, (1, 1, 1, 3, 4, 2), 24),
Sample(True, (1, 1, 2, 1, 1, 1), 25),
Sample(True, (1, 1, 2, 1, 1, 2), 26),
Sample(True, (1, 1, 2, 1, 2, 1), 27),
Sample(True, (1, 1, 2, 1, 2, 2), 28),
Sample(True, (1, 1, 2, 1, 3, 1), 29),
Sample(True, (1, 1, 2, 1, 3, 2), 30),
Sample(True, (1, 1, 2, 1, 4, 1), 31),
Sample(True, (1, 1, 2, 1, 4, 2), 32),
Sample(True, (1, 1, 2, 2, 1, 1), 33),
Sample(True, (1, 1, 2, 2, 1, 2), 34),
Sample(True, (1, 1, 2, 2, 2, 1), 35),
Sample(True, (1, 1, 2, 2, 2, 2), 36),
Sample(True, (1, 1, 2, 2, 3, 1), 37),
Sample(True, (1, 1, 2, 2, 3, 2), 38),
Sample(True, (1, 1, 2, 2, 4, 1), 39),
Sample(True, (1, 1, 2, 2, 4, 2), 40),
Sample(True, (1, 1, 2, 3, 1, 1), 41),
Sample(True, (1, 1, 2, 3, 1, 2), 42),
Sample(True, (1, 1, 2, 3, 2, 1), 43),
Sample(True, (1, 1, 2, 3, 2, 2), 44),
Sample(True, (1, 1, 2, 3, 3, 1), 45),
Sample(True, (1, 1, 2, 3, 3, 2), 46),
Sample(True, (1, 1, 2, 3, 4, 1), 47),
Sample(True, (1, 1, 2, 3, 4, 2), 48),
Sample(True, (1, 2, 1, 1, 1, 1), 49),
Sample(True, (1, 2, 1, 1, 1, 2), 50),
Sample(False, (1, 2, 1, 1, 2, 1), 51),
Sample(False, (1, 2, 1, 1, 2, 2), 52),
Sample(False, (1, 2, 1, 1, 3, 1), 53),
Sample(False, (1, 2, 1, 1, 3, 2), 54),
Sample(False, (1, 2, 1, 1, 4, 1), 55),
Sample(False, (1, 2, 1, 1, 4, 2), 56),
Sample(True, (1, 2, 1, 2, 1, 1), 57),
Sample(True, (1, 2, 1, 2, 1, 2), 58),
Sample(False, (1, 2, 1, 2, 2, 1), 59),
Sample(False, (1, 2, 1, 2, 2, 2), 60),
Sample(False, (1, 2, 1, 2, 3, 1), 61),
Sample(False, (1, 2, 1, 2, 3, 2), 62),
Sample(False, (1, 2, 1, 2, 4, 1), 63),
Sample(False, (1, 2, 1, 2, 4, 2), 64),
Sample(True, (1, 2, 1, 3, 1, 1), 65),
Sample(True, (1, 2, 1, 3, 1, 2), 66),
Sample(False, (1, 2, 1, 3, 2, 1), 67),
Sample(False, (1, 2, 1, 3, 2, 2), 68),
Sample(False, (1, 2, 1, 3, 3, 1), 69),
Sample(False, (1, 2, 1, 3, 3, 2), 70),
Sample(False, (1, 2, 1, 3, 4, 1), 71),
Sample(False, (1, 2, 1, 3, 4, 2), 72),
Sample(True, (1, 2, 2, 1, 1, 1), 73),
Sample(True, (1, 2, 2, 1, 1, 2), 74),
Sample(False, (1, 2, 2, 1, 2, 1), 75),
Sample(False, (1, 2, 2, 1, 2, 2), 76),
Sample(False, (1, 2, 2, 1, 3, 1), 77),
Sample(False, (1, 2, 2, 1, 3, 2), 78),
Sample(False, (1, 2, 2, 1, 4, 1), 79),
Sample(False, (1, 2, 2, 1, 4, 2), 80),
Sample(True, (1, 2, 2, 2, 1, 1), 81),
Sample(True, (1, 2, 2, 2, 1, 2), 82),
Sample(False, (1, 2, 2, 2, 2, 1), 83),
Sample(False, (1, 2, 2, 2, 2, 2), 84),
Sample(False, (1, 2, 2, 2, 3, 1), 85),
Sample(False, (1, 2, 2, 2, 3, 2), 86),
Sample(False, (1, 2, 2, 2, 4, 1), 87),
Sample(False, (1, 2, 2, 2, 4, 2), 88),
Sample(True, (1, 2, 2, 3, 1, 1), 89),
Sample(True, (1, 2, 2, 3, 1, 2), 90),
Sample(False, (1, 2, 2, 3, 2, 1), 91),
Sample(False, (1, 2, 2, 3, 2, 2), 92),
Sample(False, (1, 2, 2, 3, 3, 1), 93),
Sample(False, (1, 2, 2, 3, 3, 2), 94),
Sample(False, (1, 2, 2, 3, 4, 1), 95),
Sample(False, (1, 2, 2, 3, 4, 2), 96),
Sample(True, (1, 3, 1, 1, 1, 1), 97),
Sample(True, (1, 3, 1, 1, 1, 2), 98),
Sample(False, (1, 3, 1, 1, 2, 1), 99),
Sample(False, (1, 3, 1, 1, 2, 2), 100),
Sample(False, (1, 3, 1, 1, 3, 1), 101),
Sample(False, (1, 3, 1, 1, 3, 2), 102),
Sample(False, (1, 3, 1, 1, 4, 1), 103),
Sample(False, (1, 3, 1, 1, 4, 2), 104),
Sample(True, (1, 3, 1, 2, 1, 1), 105),
Sample(True, (1, 3, 1, 2, 1, 2), 106),
Sample(False, (1, 3, 1, 2, 2, 1), 107),
Sample(False, (1, 3, 1, 2, 2, 2), 108),
Sample(False, (1, 3, 1, 2, 3, 1), 109),
Sample(False, (1, 3, 1, 2, 3, 2), 110),
Sample(False, (1, 3, 1, 2, 4, 1), 111),
Sample(False, (1, 3, 1, 2, 4, 2), 112),
Sample(True, (1, 3, 1, 3, 1, 1), 113),
Sample(True, (1, 3, 1, 3, 1, 2), 114),
Sample(False, (1, 3, 1, 3, 2, 1), 115),
Sample(False, (1, 3, 1, 3, 2, 2), 116),
Sample(False, (1, 3, 1, 3, 3, 1), 117),
Sample(False, (1, 3, 1, 3, 3, 2), 118),
Sample(False, (1, 3, 1, 3, 4, 1), 119),
Sample(False, (1, 3, 1, 3, 4, 2), 120),
Sample(True, (1, 3, 2, 1, 1, 1), 121),
Sample(True, (1, 3, 2, 1, 1, 2), 122),
Sample(False, (1, 3, 2, 1, 2, 1), 123),
Sample(False, (1, 3, 2, 1, 2, 2), 124),
Sample(False, (1, 3, 2, 1, 3, 1), 125),
Sample(False, (1, 3, 2, 1, 3, 2), 126),
Sample(False, (1, 3, 2, 1, 4, 1), 127),
Sample(False, (1, 3, 2, 1, 4, 2), 128),
Sample(True, (1, 3, 2, 2, 1, 1), 129),
Sample(True, (1, 3, 2, 2, 1, 2), 130),
Sample(False, (1, 3, 2, 2, 2, 1), 131),
Sample(False, (1, 3, 2, 2, 2, 2), 132),
Sample(False, (1, 3, 2, 2, 3, 1), 133),
Sample(False, (1, 3, 2, 2, 3, 2), 134),
Sample(False, (1, 3, 2, 2, 4, 1), 135),
Sample(False, (1, 3, 2, 2, 4, 2), 136),
Sample(True, (1, 3, 2, 3, 1, 1), 137),
Sample(True, (1, 3, 2, 3, 1, 2), 138),
Sample(False, (1, 3, 2, 3, 2, 1), 139),
Sample(False, (1, 3, 2, 3, 2, 2), 140),
Sample(False, (1, 3, 2, 3, 3, 1), 141),
Sample(False, (1, 3, 2, 3, 3, 2), 142),
Sample(False, (1, 3, 2, 3, 4, 1), 143),
Sample(False, (1, 3, 2, 3, 4, 2), 144),
Sample(True, (2, 1, 1, 1, 1, 1), 145),
Sample(True, (2, 1, 1, 1, 1, 2), 146),
Sample(False, (2, 1, 1, 1, 2, 1), 147),
Sample(False, (2, 1, 1, 1, 2, 2), 148),
Sample(False, (2, 1, 1, 1, 3, 1), 149),
Sample(False, (2, 1, 1, 1, 3, 2), 150),
Sample(False, (2, 1, 1, 1, 4, 1), 151),
Sample(False, (2, 1, 1, 1, 4, 2), 152),
Sample(True, (2, 1, 1, 2, 1, 1), 153),
Sample(True, (2, 1, 1, 2, 1, 2), 154),
Sample(False, (2, 1, 1, 2, 2, 1), 155),
Sample(False, (2, 1, 1, 2, 2, 2), 156),
Sample(False, (2, 1, 1, 2, 3, 1), 157),
Sample(False, (2, 1, 1, 2, 3, 2), 158),
Sample(False, (2, 1, 1, 2, 4, 1), 159),
Sample(False, (2, 1, 1, 2, 4, 2), 160),
Sample(True, (2, 1, 1, 3, 1, 1), 161),
Sample(True, (2, 1, 1, 3, 1, 2), 162),
Sample(False, (2, 1, 1, 3, 2, 1), 163),
Sample(False, (2, 1, 1, 3, 2, 2), 164),
Sample(False, (2, 1, 1, 3, 3, 1), 165),
Sample(False, (2, 1, 1, 3, 3, 2), 166),
Sample(False, (2, 1, 1, 3, 4, 1), 167),
Sample(False, (2, 1, 1, 3, 4, 2), 168),
Sample(True, (2, 1, 2, 1, 1, 1), 169),
Sample(True, (2, 1, 2, 1, 1, 2), 170),
Sample(False, (2, 1, 2, 1, 2, 1), 171),
Sample(False, (2, 1, 2, 1, 2, 2), 172),
Sample(False, (2, 1, 2, 1, 3, 1), 173),
Sample(False, (2, 1, 2, 1, 3, 2), 174),
Sample(False, (2, 1, 2, 1, 4, 1), 175),
Sample(False, (2, 1, 2, 1, 4, 2), 176),
Sample(True, (2, 1, 2, 2, 1, 1), 177),
Sample(True, (2, 1, 2, 2, 1, 2), 178),
Sample(False, (2, 1, 2, 2, 2, 1), 179),
Sample(False, (2, 1, 2, 2, 2, 2), 180),
Sample(False, (2, 1, 2, 2, 3, 1), 181),
Sample(False, (2, 1, 2, 2, 3, 2), 182),
Sample(False, (2, 1, 2, 2, 4, 1), 183),
Sample(False, (2, 1, 2, 2, 4, 2), 184),
Sample(True, (2, 1, 2, 3, 1, 1), 185),
Sample(True, (2, 1, 2, 3, 1, 2), 186),
Sample(False, (2, 1, 2, 3, 2, 1), 187),
Sample(False, (2, 1, 2, 3, 2, 2), 188),
Sample(False, (2, 1, 2, 3, 3, 1), 189),
Sample(False, (2, 1, 2, 3, 3, 2), 190),
Sample(False, (2, 1, 2, 3, 4, 1), 191),
Sample(False, (2, 1, 2, 3, 4, 2), 192),
Sample(True, (2, 2, 1, 1, 1, 1), 193),
Sample(True, (2, 2, 1, 1, 1, 2), 194),
Sample(True, (2, 2, 1, 1, 2, 1), 195),
Sample(True, (2, 2, 1, 1, 2, 2), 196),
Sample(True, (2, 2, 1, 1, 3, 1), 197),
Sample(True, (2, 2, 1, 1, 3, 2), 198),
Sample(True, (2, 2, 1, 1, 4, 1), 199),
Sample(True, (2, 2, 1, 1, 4, 2), 200),
Sample(True, (2, 2, 1, 2, 1, 1), 201),
Sample(True, (2, 2, 1, 2, 1, 2), 202),
Sample(True, (2, 2, 1, 2, 2, 1), 203),
Sample(True, (2, 2, 1, 2, 2, 2), 204),
Sample(True, (2, 2, 1, 2, 3, 1), 205),
Sample(True, (2, 2, 1, 2, 3, 2), 206),
Sample(True, (2, 2, 1, 2, 4, 1), 207),
Sample(True, (2, 2, 1, 2, 4, 2), 208),
Sample(True, (2, 2, 1, 3, 1, 1), 209),
Sample(True, (2, 2, 1, 3, 1, 2), 210),
Sample(True, (2, 2, 1, 3, 2, 1), 211),
Sample(True, (2, 2, 1, 3, 2, 2), 212),
Sample(True, (2, 2, 1, 3, 3, 1), 213),
Sample(True, (2, 2, 1, 3, 3, 2), 214),
Sample(True, (2, 2, 1, 3, 4, 1), 215),
Sample(True, (2, 2, 1, 3, 4, 2), 216),
Sample(True, (2, 2, 2, 1, 1, 1), 217),
Sample(True, (2, 2, 2, 1, 1, 2), 218),
Sample(True, (2, 2, 2, 1, 2, 1), 219),
Sample(True, (2, 2, 2, 1, 2, 2), 220),
Sample(True, (2, 2, 2, 1, 3, 1), 221),
Sample(True, (2, 2, 2, 1, 3, 2), 222),
Sample(True, (2, 2, 2, 1, 4, 1), 223),
Sample(True, (2, 2, 2, 1, 4, 2), 224),
Sample(True, (2, 2, 2, 2, 1, 1), 225),
Sample(True, (2, 2, 2, 2, 1, 2), 226),
Sample(True, (2, 2, 2, 2, 2, 1), 227),
Sample(True, (2, 2, 2, 2, 2, 2), 228),
Sample(True, (2, 2, 2, 2, 3, 1), 229),
Sample(True, (2, 2, 2, 2, 3, 2), 230),
Sample(True, (2, 2, 2, 2, 4, 1), 231),
Sample(True, (2, 2, 2, 2, 4, 2), 232),
Sample(True, (2, 2, 2, 3, 1, 1), 233),
Sample(True, (2, 2, 2, 3, 1, 2), 234),
Sample(True, (2, 2, 2, 3, 2, 1), 235),
Sample(True, (2, 2, 2, 3, 2, 2), 236),
Sample(True, (2, 2, 2, 3, 3, 1), 237),
Sample(True, (2, 2, 2, 3, 3, 2), 238),
Sample(True, (2, 2, 2, 3, 4, 1), 239),
Sample(True, (2, 2, 2, 3, 4, 2), 240),
Sample(True, (2, 3, 1, 1, 1, 1), 241),
Sample(True, (2, 3, 1, 1, 1, 2), 242),
Sample(False, (2, 3, 1, 1, 2, 1), 243),
Sample(False, (2, 3, 1, 1, 2, 2), 244),
Sample(False, (2, 3, 1, 1, 3, 1), 245),
Sample(False, (2, 3, 1, 1, 3, 2), 246),
Sample(False, (2, 3, 1, 1, 4, 1), 247),
Sample(False, (2, 3, 1, 1, 4, 2), 248),
Sample(True, (2, 3, 1, 2, 1, 1), 249),
Sample(True, (2, 3, 1, 2, 1, 2), 250),
Sample(False, (2, 3, 1, 2, 2, 1), 251),
Sample(False, (2, 3, 1, 2, 2, 2), 252),
Sample(False, (2, 3, 1, 2, 3, 1), 253),
Sample(False, (2, 3, 1, 2, 3, 2), 254),
Sample(False, (2, 3, 1, 2, 4, 1), 255),
Sample(False, (2, 3, 1, 2, 4, 2), 256),
Sample(True, (2, 3, 1, 3, 1, 1), 257),
Sample(True, (2, 3, 1, 3, 1, 2), 258),
Sample(False, (2, 3, 1, 3, 2, 1), 259),
Sample(False, (2, 3, 1, 3, 2, 2), 260),
Sample(False, (2, 3, 1, 3, 3, 1), 261),
Sample(False, (2, 3, 1, 3, 3, 2), 262),
Sample(False, (2, 3, 1, 3, 4, 1), 263),
Sample(False, (2, 3, 1, 3, 4, 2), 264),
Sample(True, (2, 3, 2, 1, 1, 1), 265),
Sample(True, (2, 3, 2, 1, 1, 2), 266),
Sample(False, (2, 3, 2, 1, 2, 1), 267),
Sample(False, (2, 3, 2, 1, 2, 2), 268),
Sample(False, (2, 3, 2, 1, 3, 1), 269),
Sample(False, (2, 3, 2, 1, 3, 2), 270),
Sample(False, (2, 3, 2, 1, 4, 1), 271),
Sample(False, (2, 3, 2, 1, 4, 2), 272),
Sample(True, (2, 3, 2, 2, 1, 1), 273),
Sample(True, (2, 3, 2, 2, 1, 2), 274),
Sample(False, (2, 3, 2, 2, 2, 1), 275),
Sample(False, (2, 3, 2, 2, 2, 2), 276),
Sample(False, (2, 3, 2, 2, 3, 1), 277),
Sample(False, (2, 3, 2, 2, 3, 2), 278),
Sample(False, (2, 3, 2, 2, 4, 1), 279),
Sample(False, (2, 3, 2, 2, 4, 2), 280),
Sample(True, (2, 3, 2, 3, 1, 1), 281),
Sample(True, (2, 3, 2, 3, 1, 2), 282),
Sample(False, (2, 3, 2, 3, 2, 1), 283),
Sample(False, (2, 3, 2, 3, 2, 2), 284),
Sample(False, (2, 3, 2, 3, 3, 1), 285),
Sample(False, (2, 3, 2, 3, 3, 2), 286),
Sample(False, (2, 3, 2, 3, 4, 1), 287),
Sample(False, (2, 3, 2, 3, 4, 2), 288),
Sample(True, (3, 1, 1, 1, 1, 1), 289),
Sample(True, (3, 1, 1, 1, 1, 2), 290),
Sample(False, (3, 1, 1, 1, 2, 1), 291),
Sample(False, (3, 1, 1, 1, 2, 2), 292),
Sample(False, (3, 1, 1, 1, 3, 1), 293),
Sample(False, (3, 1, 1, 1, 3, 2), 294),
Sample(False, (3, 1, 1, 1, 4, 1), 295),
Sample(False, (3, 1, 1, 1, 4, 2), 296),
Sample(True, (3, 1, 1, 2, 1, 1), 297),
Sample(True, (3, 1, 1, 2, 1, 2), 298),
Sample(False, (3, 1, 1, 2, 2, 1), 299),
Sample(False, (3, 1, 1, 2, 2, 2), 300),
Sample(False, (3, 1, 1, 2, 3, 1), 301),
Sample(False, (3, 1, 1, 2, 3, 2), 302),
Sample(False, (3, 1, 1, 2, 4, 1), 303),
Sample(False, (3, 1, 1, 2, 4, 2), 304),
Sample(True, (3, 1, 1, 3, 1, 1), 305),
Sample(True, (3, 1, 1, 3, 1, 2), 306),
Sample(False, (3, 1, 1, 3, 2, 1), 307),
Sample(False, (3, 1, 1, 3, 2, 2), 308),
Sample(False, (3, 1, 1, 3, 3, 1), 309),
Sample(False, (3, 1, 1, 3, 3, 2), 310),
Sample(False, (3, 1, 1, 3, 4, 1), 311),
Sample(False, (3, 1, 1, 3, 4, 2), 312),
Sample(True, (3, 1, 2, 1, 1, 1), 313),
Sample(True, (3, 1, 2, 1, 1, 2), 314),
Sample(False, (3, 1, 2, 1, 2, 1), 315),
Sample(False, (3, 1, 2, 1, 2, 2), 316),
Sample(False, (3, 1, 2, 1, 3, 1), 317),
Sample(False, (3, 1, 2, 1, 3, 2), 318),
Sample(False, (3, 1, 2, 1, 4, 1), 319),
Sample(False, (3, 1, 2, 1, 4, 2), 320),
Sample(True, (3, 1, 2, 2, 1, 1), 321),
Sample(True, (3, 1, 2, 2, 1, 2), 322),
Sample(False, (3, 1, 2, 2, 2, 1), 323),
Sample(False, (3, 1, 2, 2, 2, 2), 324),
Sample(False, (3, 1, 2, 2, 3, 1), 325),
Sample(False, (3, 1, 2, 2, 3, 2), 326),
Sample(False, (3, 1, 2, 2, 4, 1), 327),
Sample(False, (3, 1, 2, 2, 4, 2), 328),
Sample(True, (3, 1, 2, 3, 1, 1), 329),
Sample(True, (3, 1, 2, 3, 1, 2), 330),
Sample(False, (3, 1, 2, 3, 2, 1), 331),
Sample(False, (3, 1, 2, 3, 2, 2), 332),
Sample(False, (3, 1, 2, 3, 3, 1), 333),
Sample(False, (3, 1, 2, 3, 3, 2), 334),
Sample(False, (3, 1, 2, 3, 4, 1), 335),
Sample(False, (3, 1, 2, 3, 4, 2), 336),
Sample(True, (3, 2, 1, 1, 1, 1), 337),
Sample(True, (3, 2, 1, 1, 1, 2), 338),
Sample(False, (3, 2, 1, 1, 2, 1), 339),
Sample(False, (3, 2, 1, 1, 2, 2), 340),
Sample(False, (3, 2, 1, 1, 3, 1), 341),
Sample(False, (3, 2, 1, 1, 3, 2), 342),
Sample(False, (3, 2, 1, 1, 4, 1), 343),
Sample(False, (3, 2, 1, 1, 4, 2), 344),
Sample(True, (3, 2, 1, 2, 1, 1), 345),
Sample(True, (3, 2, 1, 2, 1, 2), 346),
Sample(False, (3, 2, 1, 2, 2, 1), 347),
Sample(False, (3, 2, 1, 2, 2, 2), 348),
Sample(False, (3, 2, 1, 2, 3, 1), 349),
Sample(False, (3, 2, 1, 2, 3, 2), 350),
Sample(False, (3, 2, 1, 2, 4, 1), 351),
Sample(False, (3, 2, 1, 2, 4, 2), 352),
Sample(True, (3, 2, 1, 3, 1, 1), 353),
Sample(True, (3, 2, 1, 3, 1, 2), 354),
Sample(False, (3, 2, 1, 3, 2, 1), 355),
Sample(False, (3, 2, 1, 3, 2, 2), 356),
Sample(False, (3, 2, 1, 3, 3, 1), 357),
Sample(False, (3, 2, 1, 3, 3, 2), 358),
Sample(False, (3, 2, 1, 3, 4, 1), 359),
Sample(False, (3, 2, 1, 3, 4, 2), 360),
Sample(True, (3, 2, 2, 1, 1, 1), 361),
Sample(True, (3, 2, 2, 1, 1, 2), 362),
Sample(False, (3, 2, 2, 1, 2, 1), 363),
Sample(False, (3, 2, 2, 1, 2, 2), 364),
Sample(False, (3, 2, 2, 1, 3, 1), 365),
Sample(False, (3, 2, 2, 1, 3, 2), 366),
Sample(False, (3, 2, 2, 1, 4, 1), 367),
Sample(False, (3, 2, 2, 1, 4, 2), 368),
Sample(True, (3, 2, 2, 2, 1, 1), 369),
Sample(True, (3, 2, 2, 2, 1, 2), 370),
Sample(False, (3, 2, 2, 2, 2, 1), 371),
Sample(False, (3, 2, 2, 2, 2, 2), 372),
Sample(False, (3, 2, 2, 2, 3, 1), 373),
Sample(False, (3, 2, 2, 2, 3, 2), 374),
Sample(False, (3, 2, 2, 2, 4, 1), 375),
Sample(False, (3, 2, 2, 2, 4, 2), 376),
Sample(True, (3, 2, 2, 3, 1, 1), 377),
Sample(True, (3, 2, 2, 3, 1, 2), 378),
Sample(False, (3, 2, 2, 3, 2, 1), 379),
Sample(False, (3, 2, 2, 3, 2, 2), 380),
Sample(False, (3, 2, 2, 3, 3, 1), 381),
Sample(False, (3, 2, 2, 3, 3, 2), 382),
Sample(False, (3, 2, 2, 3, 4, 1), 383),
Sample(False, (3, 2, 2, 3, 4, 2), 384),
Sample(True, (3, 3, 1, 1, 1, 1), 385),
Sample(True, (3, 3, 1, 1, 1, 2), 386),
Sample(True, (3, 3, 1, 1, 2, 1), 387),
Sample(True, (3, 3, 1, 1, 2, 2), 388),
Sample(True, (3, 3, 1, 1, 3, 1), 389),
Sample(True, (3, 3, 1, 1, 3, 2), 390),
Sample(True, (3, 3, 1, 1, 4, 1), 391),
Sample(True, (3, 3, 1, 1, 4, 2), 392),
Sample(True, (3, 3, 1, 2, 1, 1), 393),
Sample(True, (3, 3, 1, 2, 1, 2), 394),
Sample(True, (3, 3, 1, 2, 2, 1), 395),
Sample(True, (3, 3, 1, 2, 2, 2), 396),
Sample(True, (3, 3, 1, 2, 3, 1), 397),
Sample(True, (3, 3, 1, 2, 3, 2), 398),
Sample(True, (3, 3, 1, 2, 4, 1), 399),
Sample(True, (3, 3, 1, 2, 4, 2), 400),
Sample(True, (3, 3, 1, 3, 1, 1), 401),
Sample(True, (3, 3, 1, 3, 1, 2), 402),
Sample(True, (3, 3, 1, 3, 2, 1), 403),
Sample(True, (3, 3, 1, 3, 2, 2), 404),
Sample(True, (3, 3, 1, 3, 3, 1), 405),
Sample(True, (3, 3, 1, 3, 3, 2), 406),
Sample(True, (3, 3, 1, 3, 4, 1), 407),
Sample(True, (3, 3, 1, 3, 4, 2), 408),
Sample(True, (3, 3, 2, 1, 1, 1), 409),
Sample(True, (3, 3, 2, 1, 1, 2), 410),
Sample(True, (3, 3, 2, 1, 2, 1), 411),
Sample(True, (3, 3, 2, 1, 2, 2), 412),
Sample(True, (3, 3, 2, 1, 3, 1), 413),
Sample(True, (3, 3, 2, 1, 3, 2), 414),
Sample(True, (3, 3, 2, 1, 4, 1), 415),
Sample(True, (3, 3, 2, 1, 4, 2), 416),
Sample(True, (3, 3, 2, 2, 1, 1), 417),
Sample(True, (3, 3, 2, 2, 1, 2), 418),
Sample(True, (3, 3, 2, 2, 2, 1), 419),
Sample(True, (3, 3, 2, 2, 2, 2), 420),
Sample(True, (3, 3, 2, 2, 3, 1), 421),
Sample(True, (3, 3, 2, 2, 3, 2), 422),
Sample(True, (3, 3, 2, 2, 4, 1), 423),
Sample(True, (3, 3, 2, 2, 4, 2), 424),
Sample(True, (3, 3, 2, 3, 1, 1), 425),
Sample(True, (3, 3, 2, 3, 1, 2), 426),
Sample(True, (3, 3, 2, 3, 2, 1), 427),
Sample(True, (3, 3, 2, 3, 2, 2), 428),
Sample(True, (3, 3, 2, 3, 3, 1), 429),
Sample(True, (3, 3, 2, 3, 3, 2), 430),
Sample(True, (3, 3, 2, 3, 4, 1), 431),
Sample(True, (3, 3, 2, 3, 4, 2), 432))
monk2 = (
Sample(False, (1, 1, 1, 1, 2, 2), 4),
Sample(False, (1, 1, 1, 1, 4, 1), 7),
Sample(False, (1, 1, 1, 2, 1, 1), 9),
Sample(False, (1, 1, 1, 2, 1, 2), 10),
Sample(False, (1, 1, 1, 2, 2, 1), 11),
Sample(False, (1, 1, 1, 2, 3, 1), 13),
Sample(False, (1, 1, 1, 2, 4, 1), 15),
Sample(False, (1, 1, 1, 3, 2, 1), 19),
Sample(False, (1, 1, 1, 3, 4, 1), 23),
Sample(False, (1, 1, 2, 1, 1, 1), 25),
Sample(False, (1, 1, 2, 1, 1, 2), 26),
Sample(False, (1, 1, 2, 2, 3, 1), 37),
Sample(False, (1, 1, 2, 2, 4, 1), 39),
Sample(True, (1, 1, 2, 2, 4, 2), 40),
Sample(False, (1, 1, 2, 3, 1, 2), 42),
Sample(True, (1, 1, 2, 3, 2, 2), 44),
Sample(False, (1, 2, 1, 1, 1, 2), 50),
Sample(False, (1, 2, 1, 2, 1, 2), 58),
Sample(True, (1, 2, 1, 2, 2, 2), 60),
Sample(False, (1, 2, 1, 2, 3, 1), 61),
Sample(True, (1, 2, 1, 2, 3, 2), 62),
Sample(False, (1, 2, 1, 2, 4, 1), 63),
Sample(False, (1, 2, 1, 3, 1, 1), 65),
Sample(False, (1, 2, 1, 3, 1, 2), 66),
Sample(True, (1, 2, 1, 3, 2, 2), 68),
Sample(False, (1, 2, 1, 3, 3, 1), 69),
Sample(True, (1, 2, 1, 3, 3, 2), 70),
Sample(False, (1, 2, 1, 3, 4, 1), 71),
Sample(True, (1, 2, 1, 3, 4, 2), 72),
Sample(False, (1, 2, 2, 1, 2, 1), 75),
Sample(False, (1, 2, 2, 1, 4, 1), 79),
Sample(True, (1, 2, 2, 2, 3, 1), 85),
Sample(True, (1, 2, 2, 2, 4, 1), 87),
Sample(False, (1, 2, 2, 3, 1, 1), 89),
Sample(True, (1, 2, 2, 3, 1, 2), 90),
Sample(True, (1, 2, 2, 3, 3, 1), 93),
Sample(False, (1, 2, 2, 3, 3, 2), 94),
Sample(True, (1, 2, 2, 3, 4, 1), 95),
Sample(False, (1, 2, 2, 3, 4, 2), 96),
Sample(False, (1, 3, 1, 1, 1, 2), 98),
Sample(False, (1, 3, 1, 1, 2, 2), 100),
Sample(False, (1, 3, 1, 1, 3, 1), 101),
Sample(False, (1, 3, 1, 1, 3, 2), 102),
Sample(False, (1, 3, 1, 2, 2, 1), 107),
Sample(True, (1, 3, 1, 2, 2, 2), 108),
Sample(True, (1, 3, 1, 2, 3, 2), 110),
Sample(False, (1, 3, 1, 2, 4, 1), 111),
Sample(True, (1, 3, 1, 3, 2, 2), 116),
Sample(False, (1, 3, 1, 3, 3, 1), 117),
Sample(True, (1, 3, 1, 3, 4, 2), 120),
Sample(False, (1, 3, 2, 1, 3, 1), 125),
Sample(True, (1, 3, 2, 1, 3, 2), 126),
Sample(False, (1, 3, 2, 1, 4, 1), 127),
Sample(True, (1, 3, 2, 2, 1, 2), 130),
Sample(False, (1, 3, 2, 2, 3, 2), 134),
Sample(False, (1, 3, 2, 2, 4, 2), 136),
Sample(True, (1, 3, 2, 3, 2, 1), 139),
Sample(False, (2, 1, 1, 1, 1, 1), 145),
Sample(False, (2, 1, 1, 1, 2, 2), 148),
Sample(False, (2, 1, 1, 1, 3, 1), 149),
Sample(True, (2, 1, 1, 2, 2, 2), 156),
Sample(False, (2, 1, 1, 3, 1, 2), 162),
Sample(True, (2, 1, 1, 3, 2, 2), 164),
Sample(True, (2, 1, 1, 3, 3, 2), 166),
Sample(False, (2, 1, 1, 3, 4, 1), 167),
Sample(False, (2, 1, 2, 1, 1, 1), 169),
Sample(True, (2, 1, 2, 1, 2, 2), 172),
Sample(False, (2, 1, 2, 1, 4, 1), 175),
Sample(True, (2, 1, 2, 2, 2, 1), 179),
Sample(False, (2, 1, 2, 2, 4, 2), 184),
Sample(False, (2, 1, 2, 3, 1, 1), 185),
Sample(True, (2, 1, 2, 3, 1, 2), 186),
Sample(False, (2, 1, 2, 3, 2, 2), 188),
Sample(False, (2, 1, 2, 3, 3, 2), 190),
Sample(False, (2, 1, 2, 3, 4, 2), 192),
Sample(False, (2, 2, 1, 1, 3, 1), 197),
Sample(True, (2, 2, 1, 1, 4, 2), 200),
Sample(False, (2, 2, 1, 2, 1, 1), 201),
Sample(True, (2, 2, 1, 2, 3, 1), 205),
Sample(True, (2, 2, 1, 3, 3, 1), 213),
Sample(False, (2, 2, 1, 3, 3, 2), 214),
Sample(True, (2, 2, 1, 3, 4, 1), 215),
Sample(False, (2, 2, 2, 1, 1, 1), 217),
Sample(False, (2, 2, 2, 1, 2, 2), 220),
Sample(False, (2, 2, 2, 1, 3, 2), 222),
Sample(True, (2, 2, 2, 1, 4, 1), 223),
Sample(False, (2, 2, 2, 1, 4, 2), 224),
Sample(True, (2, 2, 2, 2, 1, 1), 225),
Sample(False, (2, 2, 2, 2, 2, 2), 228),
Sample(False, (2, 2, 2, 2, 3, 1), 229),
Sample(True, (2, 2, 2, 3, 1, 1), 233),
Sample(False, (2, 2, 2, 3, 2, 1), 235),
Sample(False, (2, 2, 2, 3, 2, 2), 236),
Sample(False, (2, 2, 2, 3, 4, 2), 240),
Sample(False, (2, 3, 1, 1, 1, 1), 241),
Sample(False, (2, 3, 1, 1, 1, 2), 242),
Sample(True, (2, 3, 1, 1, 3, 2), 246),
Sample(False, (2, 3, 1, 2, 1, 1), 249),
Sample(True, (2, 3, 1, 2, 3, 1), 253),
Sample(False, (2, 3, 1, 2, 3, 2), 254),
Sample(False, (2, 3, 1, 2, 4, 2), 256),
Sample(True, (2, 3, 1, 3, 1, 2), 258),
Sample(True, (2, 3, 1, 3, 2, 1), 259),
Sample(True, (2, 3, 1, 3, 4, 1), 263),
Sample(True, (2, 3, 2, 1, 1, 2), 266),
Sample(True, (2, 3, 2, 1, 2, 1), 267),
Sample(True, (2, 3, 2, 1, 3, 1), 269),
Sample(False, (2, 3, 2, 1, 4, 2), 272),
Sample(True, (2, 3, 2, 2, 1, 1), 273),
Sample(False, (2, 3, 2, 2, 2, 1), 275),
Sample(False, (2, 3, 2, 2, 3, 2), 278),
Sample(False, (2, 3, 2, 3, 3, 1), 285),
Sample(False, (2, 3, 2, 3, 3, 2), 286),
Sample(False, (2, 3, 2, 3, 4, 2), 288),
Sample(False, (3, 1, 1, 1, 4, 1), 295),
Sample(False, (3, 1, 1, 2, 1, 2), 298),
Sample(True, (3, 1, 1, 2, 2, 2), 300),
Sample(True, (3, 1, 1, 2, 3, 2), 302),
Sample(False, (3, 1, 1, 2, 4, 1), 303),
Sample(True, (3, 1, 1, 2, 4, 2), 304),
Sample(False, (3, 1, 1, 3, 1, 1), 305),
Sample(False, (3, 1, 1, 3, 1, 2), 306),
Sample(True, (3, 1, 1, 3, 2, 2), 308),
Sample(True, (3, 1, 1, 3, 3, 2), 310),
Sample(False, (3, 1, 2, 1, 1, 1), 313),
Sample(True, (3, 1, 2, 1, 2, 2), 316),
Sample(False, (3, 1, 2, 1, 3, 1), 317),
Sample(True, (3, 1, 2, 1, 3, 2), 318),
Sample(False, (3, 1, 2, 1, 4, 1), 319),
Sample(True, (3, 1, 2, 1, 4, 2), 320),
Sample(True, (3, 1, 2, 2, 2, 1), 323),
Sample(True, (3, 1, 2, 3, 1, 2), 330),
Sample(True, (3, 1, 2, 3, 2, 1), 331),
Sample(False, (3, 1, 2, 3, 2, 2), 332),
Sample(False, (3, 1, 2, 3, 4, 2), 336),
Sample(False, (3, 2, 1, 1, 1, 2), 338),
Sample(True, (3, 2, 1, 1, 2, 2), 340),
Sample(False, (3, 2, 1, 1, 3, 1), 341),
Sample(True, (3, 2, 1, 1, 3, 2), 342),
Sample(True, (3, 2, 1, 2, 1, 2), 346),
Sample(True, (3, 2, 1, 2, 2, 1), 347),
Sample(False, (3, 2, 1, 3, 1, 1), 353),
Sample(True, (3, 2, 1, 3, 2, 1), 355),
Sample(True, (3, 2, 1, 3, 3, 1), 357),
Sample(False, (3, 2, 1, 3, 3, 2), 358),
Sample(False, (3, 2, 2, 1, 1, 1), 361),
Sample(False, (3, 2, 2, 1, 2, 2), 364),
Sample(True, (3, 2, 2, 1, 3, 1), 365),
Sample(False, (3, 2, 2, 1, 3, 2), 366),
Sample(True, (3, 2, 2, 2, 1, 1), 369),
Sample(False, (3, 2, 2, 2, 2, 1), 371),
Sample(False, (3, 2, 2, 2, 2, 2), 372),
Sample(False, (3, 2, 2, 2, 3, 2), 374),
Sample(True, (3, 2, 2, 3, 1, 1), 377),
Sample(False, (3, 2, 2, 3, 3, 2), 382),
Sample(False, (3, 2, 2, 3, 4, 2), 384),
Sample(False, (3, 3, 1, 1, 1, 1), 385),
Sample(False, (3, 3, 1, 1, 2, 1), 387),
Sample(False, (3, 3, 1, 1, 3, 1), 389),
Sample(True, (3, 3, 1, 1, 3, 2), 390),
Sample(False, (3, 3, 1, 2, 3, 2), 398),
Sample(False, (3, 3, 2, 1, 1, 1), 409),
Sample(True, (3, 3, 2, 2, 1, 1), 417),
Sample(False, (3, 3, 2, 2, 2, 1), 419),
Sample(False, (3, 3, 2, 2, 3, 1), 421),
Sample(False, (3, 3, 2, 2, 3, 2), 422),
Sample(True, (3, 3, 2, 3, 1, 1), 425),
Sample(False, (3, 3, 2, 3, 2, 1), 427),
Sample(False, (3, 3, 2, 3, 4, 2), 432))
monk2test = (
Sample(False, (1, 1, 1, 1, 1, 1), 1),
Sample(False, (1, 1, 1, 1, 1, 2), 2),
Sample(False, (1, 1, 1, 1, 2, 1), 3),
Sample(False, (1, 1, 1, 1, 2, 2), 4),
Sample(False, (1, 1, 1, 1, 3, 1), 5),
Sample(False, (1, 1, 1, 1, 3, 2), 6),
Sample(False, (1, 1, 1, 1, 4, 1), 7),
Sample(False, (1, 1, 1, 1, 4, 2), 8),
Sample(False, (1, 1, 1, 2, 1, 1), 9),
Sample(False, (1, 1, 1, 2, 1, 2), 10),
Sample(False, (1, 1, 1, 2, 2, 1), 11),
Sample(False, (1, 1, 1, 2, 2, 2), 12),
Sample(False, (1, 1, 1, 2, 3, 1), 13),
Sample(False, (1, 1, 1, 2, 3, 2), 14),
Sample(False, (1, 1, 1, 2, 4, 1), 15),
Sample(False, (1, 1, 1, 2, 4, 2), 16),
Sample(False, (1, 1, 1, 3, 1, 1), 17),
Sample(False, (1, 1, 1, 3, 1, 2), 18),
Sample(False, (1, 1, 1, 3, 2, 1), 19),
Sample(False, (1, 1, 1, 3, 2, 2), 20),
Sample(False, (1, 1, 1, 3, 3, 1), 21),
Sample(False, (1, 1, 1, 3, 3, 2), 22),
Sample(False, (1, 1, 1, 3, 4, 1), 23),
Sample(False, (1, 1, 1, 3, 4, 2), 24),
Sample(False, (1, 1, 2, 1, 1, 1), 25),
Sample(False, (1, 1, 2, 1, 1, 2), 26),
Sample(False, (1, 1, 2, 1, 2, 1), 27),
Sample(False, (1, 1, 2, 1, 2, 2), 28),
Sample(False, (1, 1, 2, 1, 3, 1), 29),
Sample(False, (1, 1, 2, 1, 3, 2), 30),
Sample(False, (1, 1, 2, 1, 4, 1), 31),
Sample(False, (1, 1, 2, 1, 4, 2), 32),
Sample(False, (1, 1, 2, 2, 1, 1), 33),
Sample(False, (1, 1, 2, 2, 1, 2), 34),
Sample(False, (1, 1, 2, 2, 2, 1), 35),
Sample(True, (1, 1, 2, 2, 2, 2), 36),
Sample(False, (1, 1, 2, 2, 3, 1), 37),
Sample(True, (1, 1, 2, 2, 3, 2), 38),
Sample(False, (1, 1, 2, 2, 4, 1), 39),
Sample(True, (1, 1, 2, 2, 4, 2), 40),
Sample(False, (1, 1, 2, 3, 1, 1), 41),
Sample(False, (1, 1, 2, 3, 1, 2), 42),
Sample(False, (1, 1, 2, 3, 2, 1), 43),
Sample(True, (1, 1, 2, 3, 2, 2), 44),
Sample(False, (1, 1, 2, 3, 3, 1), 45),
Sample(True, (1, 1, 2, 3, 3, 2), 46),
Sample(False, (1, 1, 2, 3, 4, 1), 47),
Sample(True, (1, 1, 2, 3, 4, 2), 48),
Sample(False, (1, 2, 1, 1, 1, 1), 49),
Sample(False, (1, 2, 1, 1, 1, 2), 50),
Sample(False, (1, 2, 1, 1, 2, 1), 51),
Sample(False, (1, 2, 1, 1, 2, 2), 52),
Sample(False, (1, 2, 1, 1, 3, 1), 53),
Sample(False, (1, 2, 1, 1, 3, 2), 54),
Sample(False, (1, 2, 1, 1, 4, 1), 55),
Sample(False, (1, 2, 1, 1, 4, 2), 56),
Sample(False, (1, 2, 1, 2, 1, 1), 57),
Sample(False, (1, 2, 1, 2, 1, 2), 58),
Sample(False, (1, 2, 1, 2, 2, 1), 59),
Sample(True, (1, 2, 1, 2, 2, 2), 60),
Sample(False, (1, 2, 1, 2, 3, 1), 61),
Sample(True, (1, 2, 1, 2, 3, 2), 62),
Sample(False, (1, 2, 1, 2, 4, 1), 63),
Sample(True, (1, 2, 1, 2, 4, 2), 64),
Sample(False, (1, 2, 1, 3, 1, 1), 65),
Sample(False, (1, 2, 1, 3, 1, 2), 66),
Sample(False, (1, 2, 1, 3, 2, 1), 67),
Sample(True, (1, 2, 1, 3, 2, 2), 68),
Sample(False, (1, 2, 1, 3, 3, 1), 69),
Sample(True, (1, 2, 1, 3, 3, 2), 70),
Sample(False, (1, 2, 1, 3, 4, 1), 71),
Sample(True, (1, 2, 1, 3, 4, 2), 72),
Sample(False, (1, 2, 2, 1, 1, 1), 73),
Sample(False, (1, 2, 2, 1, 1, 2), 74),
Sample(False, (1, 2, 2, 1, 2, 1), 75),
Sample(True, (1, 2, 2, 1, 2, 2), 76),
Sample(False, (1, 2, 2, 1, 3, 1), 77),
Sample(True, (1, 2, 2, 1, 3, 2), 78),
Sample(False, (1, 2, 2, 1, 4, 1), 79),
Sample(True, (1, 2, 2, 1, 4, 2), 80),
Sample(False, (1, 2, 2, 2, 1, 1), 81),
Sample(True, (1, 2, 2, 2, 1, 2), 82),
Sample(True, (1, 2, 2, 2, 2, 1), 83),
Sample(False, (1, 2, 2, 2, 2, 2), 84),
Sample(True, (1, 2, 2, 2, 3, 1), 85),
Sample(False, (1, 2, 2, 2, 3, 2), 86),
Sample(True, (1, 2, 2, 2, 4, 1), 87),
Sample(False, (1, 2, 2, 2, 4, 2), 88),
Sample(False, (1, 2, 2, 3, 1, 1), 89),
Sample(True, (1, 2, 2, 3, 1, 2), 90),
Sample(True, (1, 2, 2, 3, 2, 1), 91),
Sample(False, (1, 2, 2, 3, 2, 2), 92),
Sample(True, (1, 2, 2, 3, 3, 1), 93),
Sample(False, (1, 2, 2, 3, 3, 2), 94),
Sample(True, (1, 2, 2, 3, 4, 1), 95),
Sample(False, (1, 2, 2, 3, 4, 2), 96),
Sample(False, (1, 3, 1, 1, 1, 1), 97),
Sample(False, (1, 3, 1, 1, 1, 2), 98),
Sample(False, (1, 3, 1, 1, 2, 1), 99),
Sample(False, (1, 3, 1, 1, 2, 2), 100),
Sample(False, (1, 3, 1, 1, 3, 1), 101),
Sample(False, (1, 3, 1, 1, 3, 2), 102),
Sample(False, (1, 3, 1, 1, 4, 1), 103),
Sample(False, (1, 3, 1, 1, 4, 2), 104),
Sample(False, (1, 3, 1, 2, 1, 1), 105),
Sample(False, (1, 3, 1, 2, 1, 2), 106),
Sample(False, (1, 3, 1, 2, 2, 1), 107),
Sample(True, (1, 3, 1, 2, 2, 2), 108),
Sample(False, (1, 3, 1, 2, 3, 1), 109),
Sample(True, (1, 3, 1, 2, 3, 2), 110),
Sample(False, (1, 3, 1, 2, 4, 1), 111),
Sample(True, (1, 3, 1, 2, 4, 2), 112),
Sample(False, (1, 3, 1, 3, 1, 1), 113),
Sample(False, (1, 3, 1, 3, 1, 2), 114),
Sample(False, (1, 3, 1, 3, 2, 1), 115),
Sample(True, (1, 3, 1, 3, 2, 2), 116),
Sample(False, (1, 3, 1, 3, 3, 1), 117),
Sample(True, (1, 3, 1, 3, 3, 2), 118),
Sample(False, (1, 3, 1, 3, 4, 1), 119),
Sample(True, (1, 3, 1, 3, 4, 2), 120),
Sample(False, (1, 3, 2, 1, 1, 1), 121),
Sample(False, (1, 3, 2, 1, 1, 2), 122),
Sample(False, (1, 3, 2, 1, 2, 1), 123),
Sample(True, (1, 3, 2, 1, 2, 2), 124),
Sample(False, (1, 3, 2, 1, 3, 1), 125),
Sample(True, (1, 3, 2, 1, 3, 2), 126),
Sample(False, (1, 3, 2, 1, 4, 1), 127),
Sample(True, (1, 3, 2, 1, 4, 2), 128),
Sample(False, (1, 3, 2, 2, 1, 1), 129),
Sample(True, (1, 3, 2, 2, 1, 2), 130),
Sample(True, (1, 3, 2, 2, 2, 1), 131),
Sample(False, (1, 3, 2, 2, 2, 2), 132),
Sample(True, (1, 3, 2, 2, 3, 1), 133),
Sample(False, (1, 3, 2, 2, 3, 2), 134),
Sample(True, (1, 3, 2, 2, 4, 1), 135),
Sample(False, (1, 3, 2, 2, 4, 2), 136),
Sample(False, (1, 3, 2, 3, 1, 1), 137),
Sample(True, (1, 3, 2, 3, 1, 2), 138),
Sample(True, (1, 3, 2, 3, 2, 1), 139),
Sample(False, (1, 3, 2, 3, 2, 2), 140),
Sample(True, (1, 3, 2, 3, 3, 1), 141),
Sample(False, (1, 3, 2, 3, 3, 2), 142),
Sample(True, (1, 3, 2, 3, 4, 1), 143),
Sample(False, (1, 3, 2, 3, 4, 2), 144),
Sample(False, (2, 1, 1, 1, 1, 1), 145),
Sample(False, (2, 1, 1, 1, 1, 2), 146),
Sample(False, (2, 1, 1, 1, 2, 1), 147),
Sample(False, (2, 1, 1, 1, 2, 2), 148),
Sample(False, (2, 1, 1, 1, 3, 1), 149),
Sample(False, (2, 1, 1, 1, 3, 2), 150),
Sample(False, (2, 1, 1, 1, 4, 1), 151),
Sample(False, (2, 1, 1, 1, 4, 2), 152),
Sample(False, (2, 1, 1, 2, 1, 1), 153),
Sample(False, (2, 1, 1, 2, 1, 2), 154),
Sample(False, (2, 1, 1, 2, 2, 1), 155),
Sample(True, (2, 1, 1, 2, 2, 2), 156),
Sample(False, (2, 1, 1, 2, 3, 1), 157),
Sample(True, (2, 1, 1, 2, 3, 2), 158),
Sample(False, (2, 1, 1, 2, 4, 1), 159),
Sample(True, (2, 1, 1, 2, 4, 2), 160),
Sample(False, (2, 1, 1, 3, 1, 1), 161),
Sample(False, (2, 1, 1, 3, 1, 2), 162),
Sample(False, (2, 1, 1, 3, 2, 1), 163),
Sample(True, (2, 1, 1, 3, 2, 2), 164),
Sample(False, (2, 1, 1, 3, 3, 1), 165),
Sample(True, (2, 1, 1, 3, 3, 2), 166),
Sample(False, (2, 1, 1, 3, 4, 1), 167),
Sample(True, (2, 1, 1, 3, 4, 2), 168),
Sample(False, (2, 1, 2, 1, 1, 1), 169),
Sample(False, (2, 1, 2, 1, 1, 2), 170),
Sample(False, (2, 1, 2, 1, 2, 1), 171),
Sample(True, (2, 1, 2, 1, 2, 2), 172),
Sample(False, (2, 1, 2, 1, 3, 1), 173),
Sample(True, (2, 1, 2, 1, 3, 2), 174),
Sample(False, (2, 1, 2, 1, 4, 1), 175),
Sample(True, (2, 1, 2, 1, 4, 2), 176),
Sample(False, (2, 1, 2, 2, 1, 1), 177),
Sample(True, (2, 1, 2, 2, 1, 2), 178),
Sample(True, (2, 1, 2, 2, 2, 1), 179),
Sample(False, (2, 1, 2, 2, 2, 2), 180),
Sample(True, (2, 1, 2, 2, 3, 1), 181),
Sample(False, (2, 1, 2, 2, 3, 2), 182),
Sample(True, (2, 1, 2, 2, 4, 1), 183),
Sample(False, (2, 1, 2, 2, 4, 2), 184),
Sample(False, (2, 1, 2, 3, 1, 1), 185),
Sample(True, (2, 1, 2, 3, 1, 2), 186),
Sample(True, (2, 1, 2, 3, 2, 1), 187),
Sample(False, (2, 1, 2, 3, 2, 2), 188),
Sample(True, (2, 1, 2, 3, 3, 1), 189),
Sample(False, (2, 1, 2, 3, 3, 2), 190),
Sample(True, (2, 1, 2, 3, 4, 1), 191),
Sample(False, (2, 1, 2, 3, 4, 2), 192),
Sample(False, (2, 2, 1, 1, 1, 1), 193),
Sample(False, (2, 2, 1, 1, 1, 2), 194),
Sample(False, (2, 2, 1, 1, 2, 1), 195),
Sample(True, (2, 2, 1, 1, 2, 2), 196),
Sample(False, (2, 2, 1, 1, 3, 1), 197),
Sample(True, (2, 2, 1, 1, 3, 2), 198),
Sample(False, (2, 2, 1, 1, 4, 1), 199),
Sample(True, (2, 2, 1, 1, 4, 2), 200),
Sample(False, (2, 2, 1, 2, 1, 1), 201),
Sample(True, (2, 2, 1, 2, 1, 2), 202),
Sample(True, (2, 2, 1, 2, 2, 1), 203),
Sample(False, (2, 2, 1, 2, 2, 2), 204),
Sample(True, (2, 2, 1, 2, 3, 1), 205),
Sample(False, (2, 2, 1, 2, 3, 2), 206),
Sample(True, (2, 2, 1, 2, 4, 1), 207),
Sample(False, (2, 2, 1, 2, 4, 2), 208),
Sample(False, (2, 2, 1, 3, 1, 1), 209),
Sample(True, (2, 2, 1, 3, 1, 2), 210),
Sample(True, (2, 2, 1, 3, 2, 1), 211),
Sample(False, (2, 2, 1, 3, 2, 2), 212),
Sample(True, (2, 2, 1, 3, 3, 1), 213),
Sample(False, (2, 2, 1, 3, 3, 2), 214),
Sample(True, (2, 2, 1, 3, 4, 1), 215),
Sample(False, (2, 2, 1, 3, 4, 2), 216),
Sample(False, (2, 2, 2, 1, 1, 1), 217),
Sample(True, (2, 2, 2, 1, 1, 2), 218),
Sample(True, (2, 2, 2, 1, 2, 1), 219),
Sample(False, (2, 2, 2, 1, 2, 2), 220),
Sample(True, (2, 2, 2, 1, 3, 1), 221),
Sample(False, (2, 2, 2, 1, 3, 2), 222),
Sample(True, (2, 2, 2, 1, 4, 1), 223),
Sample(False, (2, 2, 2, 1, 4, 2), 224),
Sample(True, (2, 2, 2, 2, 1, 1), 225),
Sample(False, (2, 2, 2, 2, 1, 2), 226),
Sample(False, (2, 2, 2, 2, 2, 1), 227),
Sample(False, (2, 2, 2, 2, 2, 2), 228),
Sample(False, (2, 2, 2, 2, 3, 1), 229),
Sample(False, (2, 2, 2, 2, 3, 2), 230),
Sample(False, (2, 2, 2, 2, 4, 1), 231),
Sample(False, (2, 2, 2, 2, 4, 2), 232),
Sample(True, (2, 2, 2, 3, 1, 1), 233),
Sample(False, (2, 2, 2, 3, 1, 2), 234),
Sample(False, (2, 2, 2, 3, 2, 1), 235),
Sample(False, (2, 2, 2, 3, 2, 2), 236),
Sample(False, (2, 2, 2, 3, 3, 1), 237),
Sample(False, (2, 2, 2, 3, 3, 2), 238),
Sample(False, (2, 2, 2, 3, 4, 1), 239),
Sample(False, (2, 2, 2, 3, 4, 2), 240),
Sample(False, (2, 3, 1, 1, 1, 1), 241),
Sample(False, (2, 3, 1, 1, 1, 2), 242),
Sample(False, (2, 3, 1, 1, 2, 1), 243),
Sample(True, (2, 3, 1, 1, 2, 2), 244),
Sample(False, (2, 3, 1, 1, 3, 1), 245),
Sample(True, (2, 3, 1, 1, 3, 2), 246),
Sample(False, (2, 3, 1, 1, 4, 1), 247),
Sample(True, (2, 3, 1, 1, 4, 2), 248),
Sample(False, (2, 3, 1, 2, 1, 1), 249),
Sample(True, (2, 3, 1, 2, 1, 2), 250),
Sample(True, (2, 3, 1, 2, 2, 1), 251),
Sample(False, (2, 3, 1, 2, 2, 2), 252),
Sample(True, (2, 3, 1, 2, 3, 1), 253),
Sample(False, (2, 3, 1, 2, 3, 2), 254),
Sample(True, (2, 3, 1, 2, 4, 1), 255),
Sample(False, (2, 3, 1, 2, 4, 2), 256),
Sample(False, (2, 3, 1, 3, 1, 1), 257),
Sample(True, (2, 3, 1, 3, 1, 2), 258),
Sample(True, (2, 3, 1, 3, 2, 1), 259),
Sample(False, (2, 3, 1, 3, 2, 2), 260),
Sample(True, (2, 3, 1, 3, 3, 1), 261),
Sample(False, (2, 3, 1, 3, 3, 2), 262),
Sample(True, (2, 3, 1, 3, 4, 1), 263),
Sample(False, (2, 3, 1, 3, 4, 2), 264),
Sample(False, (2, 3, 2, 1, 1, 1), 265),
Sample(True, (2, 3, 2, 1, 1, 2), 266),
Sample(True, (2, 3, 2, 1, 2, 1), 267),
Sample(False, (2, 3, 2, 1, 2, 2), 268),
Sample(True, (2, 3, 2, 1, 3, 1), 269),
Sample(False, (2, 3, 2, 1, 3, 2), 270),
Sample(True, (2, 3, 2, 1, 4, 1), 271),
Sample(False, (2, 3, 2, 1, 4, 2), 272),
Sample(True, (2, 3, 2, 2, 1, 1), 273),
Sample(False, (2, 3, 2, 2, 1, 2), 274),
Sample(False, (2, 3, 2, 2, 2, 1), 275),
Sample(False, (2, 3, 2, 2, 2, 2), 276),
Sample(False, (2, 3, 2, 2, 3, 1), 277),
Sample(False, (2, 3, 2, 2, 3, 2), 278),
Sample(False, (2, 3, 2, 2, 4, 1), 279),
Sample(False, (2, 3, 2, 2, 4, 2), 280),
Sample(True, (2, 3, 2, 3, 1, 1), 281),
Sample(False, (2, 3, 2, 3, 1, 2), 282),
Sample(False, (2, 3, 2, 3, 2, 1), 283),
Sample(False, (2, 3, 2, 3, 2, 2), 284),
Sample(False, (2, 3, 2, 3, 3, 1), 285),
Sample(False, (2, 3, 2, 3, 3, 2), 286),
Sample(False, (2, 3, 2, 3, 4, 1), 287),
Sample(False, (2, 3, 2, 3, 4, 2), 288),
Sample(False, (3, 1, 1, 1, 1, 1), 289),
Sample(False, (3, 1, 1, 1, 1, 2), 290),
Sample(False, (3, 1, 1, 1, 2, 1), 291),
Sample(False, (3, 1, 1, 1, 2, 2), 292),
Sample(False, (3, 1, 1, 1, 3, 1), 293),
Sample(False, (3, 1, 1, 1, 3, 2), 294),
Sample(False, (3, 1, 1, 1, 4, 1), 295),
Sample(False, (3, 1, 1, 1, 4, 2), 296),
Sample(False, (3, 1, 1, 2, 1, 1), 297),
Sample(False, (3, 1, 1, 2, 1, 2), 298),
Sample(False, (3, 1, 1, 2, 2, 1), 299),
Sample(True, (3, 1, 1, 2, 2, 2), 300),
Sample(False, (3, 1, 1, 2, 3, 1), 301),
Sample(True, (3, 1, 1, 2, 3, 2), 302),
Sample(False, (3, 1, 1, 2, 4, 1), 303),
Sample(True, (3, 1, 1, 2, 4, 2), 304),
Sample(False, (3, 1, 1, 3, 1, 1), 305),
Sample(False, (3, 1, 1, 3, 1, 2), 306),
Sample(False, (3, 1, 1, 3, 2, 1), 307),
Sample(True, (3, 1, 1, 3, 2, 2), 308),
Sample(False, (3, 1, 1, 3, 3, 1), 309),
Sample(True, (3, 1, 1, 3, 3, 2), 310),
Sample(False, (3, 1, 1, 3, 4, 1), 311),
Sample(True, (3, 1, 1, 3, 4, 2), 312),
Sample(False, (3, 1, 2, 1, 1, 1), 313),
Sample(False, (3, 1, 2, 1, 1, 2), 314),
Sample(False, (3, 1, 2, 1, 2, 1), 315),
Sample(True, (3, 1, 2, 1, 2, 2), 316),
Sample(False, (3, 1, 2, 1, 3, 1), 317),
Sample(True, (3, 1, 2, 1, 3, 2), 318),
Sample(False, (3, 1, 2, 1, 4, 1), 319),
Sample(True, (3, 1, 2, 1, 4, 2), 320),
Sample(False, (3, 1, 2, 2, 1, 1), 321),
Sample(True, (3, 1, 2, 2, 1, 2), 322),
Sample(True, (3, 1, 2, 2, 2, 1), 323),
Sample(False, (3, 1, 2, 2, 2, 2), 324),
Sample(True, (3, 1, 2, 2, 3, 1), 325),
Sample(False, (3, 1, 2, 2, 3, 2), 326),
Sample(True, (3, 1, 2, 2, 4, 1), 327),
Sample(False, (3, 1, 2, 2, 4, 2), 328),
Sample(False, (3, 1, 2, 3, 1, 1), 329),
Sample(True, (3, 1, 2, 3, 1, 2), 330),
Sample(True, (3, 1, 2, 3, 2, 1), 331),
Sample(False, (3, 1, 2, 3, 2, 2), 332),
Sample(True, (3, 1, 2, 3, 3, 1), 333),
Sample(False, (3, 1, 2, 3, 3, 2), 334),
Sample(True, (3, 1, 2, 3, 4, 1), 335),
Sample(False, (3, 1, 2, 3, 4, 2), 336),
Sample(False, (3, 2, 1, 1, 1, 1), 337),
Sample(False, (3, 2, 1, 1, 1, 2), 338),
Sample(False, (3, 2, 1, 1, 2, 1), 339),
Sample(True, (3, 2, 1, 1, 2, 2), 340),
Sample(False, (3, 2, 1, 1, 3, 1), 341),
Sample(True, (3, 2, 1, 1, 3, 2), 342),
Sample(False, (3, 2, 1, 1, 4, 1), 343),
Sample(True, (3, 2, 1, 1, 4, 2), 344),
Sample(False, (3, 2, 1, 2, 1, 1), 345),
Sample(True, (3, 2, 1, 2, 1, 2), 346),
Sample(True, (3, 2, 1, 2, 2, 1), 347),
Sample(False, (3, 2, 1, 2, 2, 2), 348),
Sample(True, (3, 2, 1, 2, 3, 1), 349),
Sample(False, (3, 2, 1, 2, 3, 2), 350),
Sample(True, (3, 2, 1, 2, 4, 1), 351),
Sample(False, (3, 2, 1, 2, 4, 2), 352),
Sample(False, (3, 2, 1, 3, 1, 1), 353),
Sample(True, (3, 2, 1, 3, 1, 2), 354),
Sample(True, (3, 2, 1, 3, 2, 1), 355),
Sample(False, (3, 2, 1, 3, 2, 2), 356),
Sample(True, (3, 2, 1, 3, 3, 1), 357),
Sample(False, (3, 2, 1, 3, 3, 2), 358),
Sample(True, (3, 2, 1, 3, 4, 1), 359),
Sample(False, (3, 2, 1, 3, 4, 2), 360),
Sample(False, (3, 2, 2, 1, 1, 1), 361),
Sample(True, (3, 2, 2, 1, 1, 2), 362),
Sample(True, (3, 2, 2, 1, 2, 1), 363),
Sample(False, (3, 2, 2, 1, 2, 2), 364),
Sample(True, (3, 2, 2, 1, 3, 1), 365),
Sample(False, (3, 2, 2, 1, 3, 2), 366),
Sample(True, (3, 2, 2, 1, 4, 1), 367),
Sample(False, (3, 2, 2, 1, 4, 2), 368),
Sample(True, (3, 2, 2, 2, 1, 1), 369),
Sample(False, (3, 2, 2, 2, 1, 2), 370),
Sample(False, (3, 2, 2, 2, 2, 1), 371),
Sample(False, (3, 2, 2, 2, 2, 2), 372),
Sample(False, (3, 2, 2, 2, 3, 1), 373),
Sample(False, (3, 2, 2, 2, 3, 2), 374),
Sample(False, (3, 2, 2, 2, 4, 1), 375),
Sample(False, (3, 2, 2, 2, 4, 2), 376),
Sample(True, (3, 2, 2, 3, 1, 1), 377),
Sample(False, (3, 2, 2, 3, 1, 2), 378),
Sample(False, (3, 2, 2, 3, 2, 1), 379),
Sample(False, (3, 2, 2, 3, 2, 2), 380),
Sample(False, (3, 2, 2, 3, 3, 1), 381),
Sample(False, (3, 2, 2, 3, 3, 2), 382),
Sample(False, (3, 2, 2, 3, 4, 1), 383),
Sample(False, (3, 2, 2, 3, 4, 2), 384),
Sample(False, (3, 3, 1, 1, 1, 1), 385),
Sample(False, (3, 3, 1, 1, 1, 2), 386),
Sample(False, (3, 3, 1, 1, 2, 1), 387),
Sample(True, (3, 3, 1, 1, 2, 2), 388),
Sample(False, (3, 3, 1, 1, 3, 1), 389),
Sample(True, (3, 3, 1, 1, 3, 2), 390),
Sample(False, (3, 3, 1, 1, 4, 1), 391),
Sample(True, (3, 3, 1, 1, 4, 2), 392),
Sample(False, (3, 3, 1, 2, 1, 1), 393),
Sample(True, (3, 3, 1, 2, 1, 2), 394),
Sample(True, (3, 3, 1, 2, 2, 1), 395),
Sample(False, (3, 3, 1, 2, 2, 2), 396),
Sample(True, (3, 3, 1, 2, 3, 1), 397),
Sample(False, (3, 3, 1, 2, 3, 2), 398),
Sample(True, (3, 3, 1, 2, 4, 1), 399),
Sample(False, (3, 3, 1, 2, 4, 2), 400),
Sample(False, (3, 3, 1, 3, 1, 1), 401),
Sample(True, (3, 3, 1, 3, 1, 2), 402),
Sample(True, (3, 3, 1, 3, 2, 1), 403),
Sample(False, (3, 3, 1, 3, 2, 2), 404),
Sample(True, (3, 3, 1, 3, 3, 1), 405),
Sample(False, (3, 3, 1, 3, 3, 2), 406),
Sample(True, (3, 3, 1, 3, 4, 1), 407),
Sample(False, (3, 3, 1, 3, 4, 2), 408),
Sample(False, (3, 3, 2, 1, 1, 1), 409),
Sample(True, (3, 3, 2, 1, 1, 2), 410),
Sample(True, (3, 3, 2, 1, 2, 1), 411),
Sample(False, (3, 3, 2, 1, 2, 2), 412),
Sample(True, (3, 3, 2, 1, 3, 1), 413),
Sample(False, (3, 3, 2, 1, 3, 2), 414),
Sample(True, (3, 3, 2, 1, 4, 1), 415),
Sample(False, (3, 3, 2, 1, 4, 2), 416),
Sample(True, (3, 3, 2, 2, 1, 1), 417),
Sample(False, (3, 3, 2, 2, 1, 2), 418),
Sample(False, (3, 3, 2, 2, 2, 1), 419),
Sample(False, (3, 3, 2, 2, 2, 2), 420),
Sample(False, (3, 3, 2, 2, 3, 1), 421),
Sample(False, (3, 3, 2, 2, 3, 2), 422),
Sample(False, (3, 3, 2, 2, 4, 1), 423),
Sample(False, (3, 3, 2, 2, 4, 2), 424),
Sample(True, (3, 3, 2, 3, 1, 1), 425),
Sample(False, (3, 3, 2, 3, 1, 2), 426),
Sample(False, (3, 3, 2, 3, 2, 1), 427),
Sample(False, (3, 3, 2, 3, 2, 2), 428),
Sample(False, (3, 3, 2, 3, 3, 1), 429),
Sample(False, (3, 3, 2, 3, 3, 2), 430),
Sample(False, (3, 3, 2, 3, 4, 1), 431),
Sample(False, (3, 3, 2, 3, 4, 2), 432))
monk3 = (
Sample(True, (1, 1, 1, 1, 1, 2), 2),
Sample(True, (1, 1, 1, 1, 2, 1), 3),
Sample(True, (1, 1, 1, 1, 2, 2), 4),
Sample(False, (1, 1, 1, 1, 3, 1), 5),
Sample(False, (1, 1, 1, 1, 4, 1), 7),
Sample(True, (1, 1, 1, 2, 1, 1), 9),
Sample(True, (1, 1, 1, 2, 2, 2), 12),
Sample(False, (1, 1, 1, 2, 4, 2), 16),
Sample(True, (1, 1, 2, 1, 2, 2), 28),
Sample(False, (1, 1, 2, 1, 4, 2), 32),
Sample(True, (1, 1, 2, 2, 2, 2), 36),
Sample(False, (1, 1, 2, 2, 4, 1), 39),
Sample(False, (1, 1, 2, 2, 4, 2), 40),
Sample(True, (1, 1, 2, 3, 1, 1), 41),
Sample(True, (1, 1, 2, 3, 1, 2), 42),
Sample(True, (1, 1, 2, 3, 3, 1), 45),
Sample(True, (1, 1, 2, 3, 3, 2), 46),
Sample(True, (1, 2, 1, 1, 3, 1), 53),
Sample(True, (1, 2, 1, 2, 2, 1), 59),
Sample(True, (1, 2, 1, 2, 2, 2), 60),
Sample(False, (1, 2, 1, 2, 3, 1), 61),
Sample(True, (1, 2, 1, 3, 1, 1), 65),
Sample(True, (1, 2, 1, 3, 1, 2), 66),
Sample(True, (1, 2, 1, 3, 2, 1), 67),
Sample(True, (1, 2, 1, 3, 2, 2), 68),
Sample(True, (1, 2, 1, 3, 3, 2), 70),
Sample(False, (1, 2, 1, 3, 4, 1), 71),
Sample(True, (1, 2, 2, 1, 3, 1), 77),
Sample(False, (1, 2, 2, 1, 4, 2), 80),
Sample(True, (1, 2, 2, 2, 1, 1), 81),
Sample(True, (1, 2, 2, 2, 2, 1), 83),
Sample(True, (1, 2, 2, 2, 2, 2), 84),
Sample(True, (1, 2, 2, 3, 1, 1), 89),
Sample(True, (1, 2, 2, 3, 2, 1), 91),
Sample(True, (1, 2, 2, 3, 2, 2), 92),
Sample(False, (1, 3, 1, 1, 2, 1), 99),
Sample(False, (1, 3, 1, 1, 4, 1), 103),
Sample(False, (1, 3, 1, 2, 3, 2), 110),
Sample(False, (1, 3, 1, 2, 4, 1), 111),
Sample(False, (1, 3, 1, 3, 1, 1), 113),
Sample(False, (1, 3, 1, 3, 3, 1), 117),
Sample(False, (1, 3, 2, 1, 1, 1), 121),
Sample(False, (1, 3, 2, 1, 1, 2), 122),
Sample(False, (1, 3, 2, 1, 2, 1), 123),
Sample(False, (1, 3, 2, 1, 4, 2), 128),
Sample(False, (1, 3, 2, 2, 3, 2), 134),
Sample(False, (1, 3, 2, 2, 4, 2), 136),
Sample(False, (1, 3, 2, 3, 4, 1), 143),
Sample(True, (2, 1, 1, 1, 1, 1), 145),
Sample(True, (2, 1, 1, 1, 1, 2), 146),
Sample(False, (2, 1, 1, 1, 4, 1), 151),
Sample(False, (2, 1, 1, 1, 4, 2), 152),
Sample(True, (2, 1, 1, 2, 1, 1), 153),
Sample(True, (2, 1, 1, 2, 1, 2), 154),
Sample(True, (2, 1, 1, 3, 2, 2), 164),
Sample(True, (2, 1, 1, 3, 3, 2), 166),
Sample(False, (2, 1, 1, 3, 4, 1), 167),
Sample(True, (2, 1, 2, 1, 2, 2), 172),
Sample(False, (2, 1, 2, 2, 4, 1), 183),
Sample(True, (2, 1, 2, 3, 1, 2), 186),
Sample(True, (2, 2, 1, 1, 3, 2), 198),
Sample(False, (2, 2, 1, 1, 4, 2), 200),
Sample(True, (2, 2, 1, 2, 1, 2), 202),
Sample(False, (2, 2, 1, 2, 2, 1), 203),
Sample(True, (2, 2, 1, 3, 1, 1), 209),
Sample(True, (2, 2, 1, 3, 2, 2), 212),
Sample(False, (2, 2, 1, 3, 3, 1), 213),
Sample(False, (2, 2, 1, 3, 3, 2), 214),
Sample(False, (2, 2, 1, 3, 4, 2), 216),
Sample(True, (2, 2, 2, 1, 2, 2), 220),
Sample(True, (2, 2, 2, 2, 1, 2), 226),
Sample(True, (2, 2, 2, 2, 3, 1), 229),
Sample(True, (2, 2, 2, 2, 3, 2), 230),
Sample(False, (2, 2, 2, 3, 4, 1), 239),
Sample(True, (2, 3, 1, 1, 3, 1), 245),
Sample(False, (2, 3, 1, 2, 1, 1), 249),
Sample(False, (2, 3, 1, 2, 2, 1), 251),
Sample(False, (2, 3, 1, 2, 2, 2), 252),
Sample(False, (2, 3, 1, 2, 3, 2), 254),
Sample(False, (2, 3, 1, 3, 3, 1), 261),
Sample(False, (2, 3, 2, 1, 1, 2), 266),
Sample(False, (2, 3, 2, 1, 2, 2), 268),
Sample(False, (2, 3, 2, 1, 4, 1), 271),
Sample(False, (2, 3, 2, 2, 3, 1), 277),
Sample(False, (2, 3, 2, 2, 4, 2), 280),
Sample(False, (2, 3, 2, 3, 1, 1), 281),
Sample(False, (2, 3, 2, 3, 2, 1), 283),
Sample(False, (2, 3, 2, 3, 4, 2), 288),
Sample(True, (3, 1, 1, 1, 1, 1), 289),
Sample(True, (3, 1, 1, 1, 2, 1), 291),
Sample(True, (3, 1, 1, 1, 3, 1), 293),
Sample(False, (3, 1, 1, 2, 4, 2), 304),
Sample(True, (3, 1, 1, 3, 1, 2), 306),
Sample(False, (3, 1, 1, 3, 4, 2), 312),
Sample(True, (3, 1, 2, 1, 2, 1), 315),
Sample(True, (3, 1, 2, 2, 3, 2), 326),
Sample(False, (3, 1, 2, 2, 4, 2), 328),
Sample(True, (3, 1, 2, 3, 1, 1), 329),
Sample(True, (3, 2, 1, 1, 2, 2), 340),
Sample(False, (3, 2, 1, 1, 4, 1), 343),
Sample(True, (3, 2, 1, 2, 3, 1), 349),
Sample(True, (3, 2, 1, 3, 1, 2), 354),
Sample(True, (3, 2, 2, 1, 2, 2), 364),
Sample(True, (3, 2, 2, 1, 3, 2), 366),
Sample(True, (3, 2, 2, 2, 1, 2), 370),
Sample(True, (3, 2, 2, 3, 1, 1), 377),
Sample(True, (3, 2, 2, 3, 3, 2), 382),
Sample(False, (3, 2, 2, 3, 4, 1), 383),
Sample(True, (3, 3, 1, 1, 3, 2), 390),
Sample(True, (3, 3, 1, 1, 4, 1), 391),
Sample(False, (3, 3, 1, 2, 4, 2), 400),
Sample(False, (3, 3, 1, 3, 1, 1), 401),
Sample(False, (3, 3, 1, 3, 2, 1), 403),
Sample(False, (3, 3, 1, 3, 2, 2), 404),
Sample(False, (3, 3, 1, 3, 4, 1), 407),
Sample(False, (3, 3, 2, 1, 1, 1), 409),
Sample(False, (3, 3, 2, 1, 1, 2), 410),
Sample(False, (3, 3, 2, 2, 2, 2), 420),
Sample(False, (3, 3, 2, 2, 3, 2), 422),
Sample(False, (3, 3, 2, 3, 1, 1), 425),
Sample(False, (3, 3, 2, 3, 3, 2), 430),
Sample(False, (3, 3, 2, 3, 4, 2), 432))
monk3test = (
Sample(True, (1, 1, 1, 1, 1, 1), 1),
Sample(True, (1, 1, 1, 1, 1, 2), 2),
Sample(True, (1, 1, 1, 1, 2, 1), 3),
Sample(True, (1, 1, 1, 1, 2, 2), 4),
Sample(True, (1, 1, 1, 1, 3, 1), 5),
Sample(True, (1, 1, 1, 1, 3, 2), 6),
Sample(False, (1, 1, 1, 1, 4, 1), 7),
Sample(False, (1, 1, 1, 1, 4, 2), 8),
Sample(True, (1, 1, 1, 2, 1, 1), 9),
Sample(True, (1, 1, 1, 2, 1, 2), 10),
Sample(True, (1, 1, 1, 2, 2, 1), 11),
Sample(True, (1, 1, 1, 2, 2, 2), 12),
Sample(True, (1, 1, 1, 2, 3, 1), 13),
Sample(True, (1, 1, 1, 2, 3, 2), 14),
Sample(False, (1, 1, 1, 2, 4, 1), 15),
Sample(False, (1, 1, 1, 2, 4, 2), 16),
Sample(True, (1, 1, 1, 3, 1, 1), 17),
Sample(True, (1, 1, 1, 3, 1, 2), 18),
Sample(True, (1, 1, 1, 3, 2, 1), 19),
Sample(True, (1, 1, 1, 3, 2, 2), 20),
Sample(True, (1, 1, 1, 3, 3, 1), 21),
Sample(True, (1, 1, 1, 3, 3, 2), 22),
Sample(False, (1, 1, 1, 3, 4, 1), 23),
Sample(False, (1, 1, 1, 3, 4, 2), 24),
Sample(True, (1, 1, 2, 1, 1, 1), 25),
Sample(True, (1, 1, 2, 1, 1, 2), 26),
Sample(True, (1, 1, 2, 1, 2, 1), 27),
Sample(True, (1, 1, 2, 1, 2, 2), 28),
Sample(True, (1, 1, 2, 1, 3, 1), 29),
Sample(True, (1, 1, 2, 1, 3, 2), 30),
Sample(False, (1, 1, 2, 1, 4, 1), 31),
Sample(False, (1, 1, 2, 1, 4, 2), 32),
Sample(True, (1, 1, 2, 2, 1, 1), 33),
Sample(True, (1, 1, 2, 2, 1, 2), 34),
Sample(True, (1, 1, 2, 2, 2, 1), 35),
Sample(True, (1, 1, 2, 2, 2, 2), 36),
Sample(True, (1, 1, 2, 2, 3, 1), 37),
Sample(True, (1, 1, 2, 2, 3, 2), 38),
Sample(False, (1, 1, 2, 2, 4, 1), 39),
Sample(False, (1, 1, 2, 2, 4, 2), 40),
Sample(True, (1, 1, 2, 3, 1, 1), 41),
Sample(True, (1, 1, 2, 3, 1, 2), 42),
Sample(True, (1, 1, 2, 3, 2, 1), 43),
Sample(True, (1, 1, 2, 3, 2, 2), 44),
Sample(True, (1, 1, 2, 3, 3, 1), 45),
Sample(True, (1, 1, 2, 3, 3, 2), 46),
Sample(False, (1, 1, 2, 3, 4, 1), 47),
Sample(False, (1, 1, 2, 3, 4, 2), 48),
Sample(True, (1, 2, 1, 1, 1, 1), 49),
Sample(True, (1, 2, 1, 1, 1, 2), 50),
Sample(True, (1, 2, 1, 1, 2, 1), 51),
Sample(True, (1, 2, 1, 1, 2, 2), 52),
Sample(True, (1, 2, 1, 1, 3, 1), 53),
Sample(True, (1, 2, 1, 1, 3, 2), 54),
Sample(False, (1, 2, 1, 1, 4, 1), 55),
Sample(False, (1, 2, 1, 1, 4, 2), 56),
Sample(True, (1, 2, 1, 2, 1, 1), 57),
Sample(True, (1, 2, 1, 2, 1, 2), 58),
Sample(True, (1, 2, 1, 2, 2, 1), 59),
Sample(True, (1, 2, 1, 2, 2, 2), 60),
Sample(True, (1, 2, 1, 2, 3, 1), 61),
Sample(True, (1, 2, 1, 2, 3, 2), 62),
Sample(False, (1, 2, 1, 2, 4, 1), 63),
Sample(False, (1, 2, 1, 2, 4, 2), 64),
Sample(True, (1, 2, 1, 3, 1, 1), 65),
Sample(True, (1, 2, 1, 3, 1, 2), 66),
Sample(True, (1, 2, 1, 3, 2, 1), 67),
Sample(True, (1, 2, 1, 3, 2, 2), 68),
Sample(True, (1, 2, 1, 3, 3, 1), 69),
Sample(True, (1, 2, 1, 3, 3, 2), 70),
Sample(False, (1, 2, 1, 3, 4, 1), 71),
Sample(False, (1, 2, 1, 3, 4, 2), 72),
Sample(True, (1, 2, 2, 1, 1, 1), 73),
Sample(True, (1, 2, 2, 1, 1, 2), 74),
Sample(True, (1, 2, 2, 1, 2, 1), 75),
Sample(True, (1, 2, 2, 1, 2, 2), 76),
Sample(True, (1, 2, 2, 1, 3, 1), 77),
Sample(True, (1, 2, 2, 1, 3, 2), 78),
Sample(False, (1, 2, 2, 1, 4, 1), 79),
Sample(False, (1, 2, 2, 1, 4, 2), 80),
Sample(True, (1, 2, 2, 2, 1, 1), 81),
Sample(True, (1, 2, 2, 2, 1, 2), 82),
Sample(True, (1, 2, 2, 2, 2, 1), 83),
Sample(True, (1, 2, 2, 2, 2, 2), 84),
Sample(True, (1, 2, 2, 2, 3, 1), 85),
Sample(True, (1, 2, 2, 2, 3, 2), 86),
Sample(False, (1, 2, 2, 2, 4, 1), 87),
Sample(False, (1, 2, 2, 2, 4, 2), 88),
Sample(True, (1, 2, 2, 3, 1, 1), 89),
Sample(True, (1, 2, 2, 3, 1, 2), 90),
Sample(True, (1, 2, 2, 3, 2, 1), 91),
Sample(True, (1, 2, 2, 3, 2, 2), 92),
Sample(True, (1, 2, 2, 3, 3, 1), 93),
Sample(True, (1, 2, 2, 3, 3, 2), 94),
Sample(False, (1, 2, 2, 3, 4, 1), 95),
Sample(False, (1, 2, 2, 3, 4, 2), 96),
Sample(False, (1, 3, 1, 1, 1, 1), 97),
Sample(False, (1, 3, 1, 1, 1, 2), 98),
Sample(False, (1, 3, 1, 1, 2, 1), 99),
Sample(False, (1, 3, 1, 1, 2, 2), 100),
Sample(True, (1, 3, 1, 1, 3, 1), 101),
Sample(True, (1, 3, 1, 1, 3, 2), 102),
Sample(False, (1, 3, 1, 1, 4, 1), 103),
Sample(False, (1, 3, 1, 1, 4, 2), 104),
Sample(False, (1, 3, 1, 2, 1, 1), 105),
Sample(False, (1, 3, 1, 2, 1, 2), 106),
Sample(False, (1, 3, 1, 2, 2, 1), 107),
Sample(False, (1, 3, 1, 2, 2, 2), 108),
Sample(False, (1, 3, 1, 2, 3, 1), 109),
Sample(False, (1, 3, 1, 2, 3, 2), 110),
Sample(False, (1, 3, 1, 2, 4, 1), 111),
Sample(False, (1, 3, 1, 2, 4, 2), 112),
Sample(False, (1, 3, 1, 3, 1, 1), 113),
Sample(False, (1, 3, 1, 3, 1, 2), 114),
Sample(False, (1, 3, 1, 3, 2, 1), 115),
Sample(False, (1, 3, 1, 3, 2, 2), 116),
Sample(False, (1, 3, 1, 3, 3, 1), 117),
Sample(False, (1, 3, 1, 3, 3, 2), 118),
Sample(False, (1, 3, 1, 3, 4, 1), 119),
Sample(False, (1, 3, 1, 3, 4, 2), 120),
Sample(False, (1, 3, 2, 1, 1, 1), 121),
Sample(False, (1, 3, 2, 1, 1, 2), 122),
Sample(False, (1, 3, 2, 1, 2, 1), 123),
Sample(False, (1, 3, 2, 1, 2, 2), 124),
Sample(True, (1, 3, 2, 1, 3, 1), 125),
Sample(True, (1, 3, 2, 1, 3, 2), 126),
Sample(False, (1, 3, 2, 1, 4, 1), 127),
Sample(False, (1, 3, 2, 1, 4, 2), 128),
Sample(False, (1, 3, 2, 2, 1, 1), 129),
Sample(False, (1, 3, 2, 2, 1, 2), 130),
Sample(False, (1, 3, 2, 2, 2, 1), 131),
Sample(False, (1, 3, 2, 2, 2, 2), 132),
Sample(False, (1, 3, 2, 2, 3, 1), 133),
Sample(False, (1, 3, 2, 2, 3, 2), 134),
Sample(False, (1, 3, 2, 2, 4, 1), 135),
Sample(False, (1, 3, 2, 2, 4, 2), 136),
Sample(False, (1, 3, 2, 3, 1, 1), 137),
Sample(False, (1, 3, 2, 3, 1, 2), 138),
Sample(False, (1, 3, 2, 3, 2, 1), 139),
Sample(False, (1, 3, 2, 3, 2, 2), 140),
Sample(False, (1, 3, 2, 3, 3, 1), 141),
Sample(False, (1, 3, 2, 3, 3, 2), 142),
Sample(False, (1, 3, 2, 3, 4, 1), 143),
Sample(False, (1, 3, 2, 3, 4, 2), 144),
Sample(True, (2, 1, 1, 1, 1, 1), 145),
Sample(True, (2, 1, 1, 1, 1, 2), 146),
Sample(True, (2, 1, 1, 1, 2, 1), 147),
Sample(True, (2, 1, 1, 1, 2, 2), 148),
Sample(True, (2, 1, 1, 1, 3, 1), 149),
Sample(True, (2, 1, 1, 1, 3, 2), 150),
Sample(False, (2, 1, 1, 1, 4, 1), 151),
Sample(False, (2, 1, 1, 1, 4, 2), 152),
Sample(True, (2, 1, 1, 2, 1, 1), 153),
Sample(True, (2, 1, 1, 2, 1, 2), 154),
Sample(True, (2, 1, 1, 2, 2, 1), 155),
Sample(True, (2, 1, 1, 2, 2, 2), 156),
Sample(True, (2, 1, 1, 2, 3, 1), 157),
Sample(True, (2, 1, 1, 2, 3, 2), 158),
Sample(False, (2, 1, 1, 2, 4, 1), 159),
Sample(False, (2, 1, 1, 2, 4, 2), 160),
Sample(True, (2, 1, 1, 3, 1, 1), 161),
Sample(True, (2, 1, 1, 3, 1, 2), 162),
Sample(True, (2, 1, 1, 3, 2, 1), 163),
Sample(True, (2, 1, 1, 3, 2, 2), 164),
Sample(True, (2, 1, 1, 3, 3, 1), 165),
Sample(True, (2, 1, 1, 3, 3, 2), 166),
Sample(False, (2, 1, 1, 3, 4, 1), 167),
Sample(False, (2, 1, 1, 3, 4, 2), 168),
Sample(True, (2, 1, 2, 1, 1, 1), 169),
Sample(True, (2, 1, 2, 1, 1, 2), 170),
Sample(True, (2, 1, 2, 1, 2, 1), 171),
Sample(True, (2, 1, 2, 1, 2, 2), 172),
Sample(True, (2, 1, 2, 1, 3, 1), 173),
Sample(True, (2, 1, 2, 1, 3, 2), 174),
Sample(False, (2, 1, 2, 1, 4, 1), 175),
Sample(False, (2, 1, 2, 1, 4, 2), 176),
Sample(True, (2, 1, 2, 2, 1, 1), 177),
Sample(True, (2, 1, 2, 2, 1, 2), 178),
Sample(True, (2, 1, 2, 2, 2, 1), 179),
Sample(True, (2, 1, 2, 2, 2, 2), 180),
Sample(True, (2, 1, 2, 2, 3, 1), 181),
Sample(True, (2, 1, 2, 2, 3, 2), 182),
Sample(False, (2, 1, 2, 2, 4, 1), 183),
Sample(False, (2, 1, 2, 2, 4, 2), 184),
Sample(True, (2, 1, 2, 3, 1, 1), 185),
Sample(True, (2, 1, 2, 3, 1, 2), 186),
Sample(True, (2, 1, 2, 3, 2, 1), 187),
Sample(True, (2, 1, 2, 3, 2, 2), 188),
Sample(True, (2, 1, 2, 3, 3, 1), 189),
Sample(True, (2, 1, 2, 3, 3, 2), 190),
Sample(False, (2, 1, 2, 3, 4, 1), 191),
Sample(False, (2, 1, 2, 3, 4, 2), 192),
Sample(True, (2, 2, 1, 1, 1, 1), 193),
Sample(True, (2, 2, 1, 1, 1, 2), 194),
Sample(True, (2, 2, 1, 1, 2, 1), 195),
Sample(True, (2, 2, 1, 1, 2, 2), 196),
Sample(True, (2, 2, 1, 1, 3, 1), 197),
Sample(True, (2, 2, 1, 1, 3, 2), 198),
Sample(False, (2, 2, 1, 1, 4, 1), 199),
Sample(False, (2, 2, 1, 1, 4, 2), 200),
Sample(True, (2, 2, 1, 2, 1, 1), 201),
Sample(True, (2, 2, 1, 2, 1, 2), 202),
Sample(True, (2, 2, 1, 2, 2, 1), 203),
Sample(True, (2, 2, 1, 2, 2, 2), 204),
Sample(True, (2, 2, 1, 2, 3, 1), 205),
Sample(True, (2, 2, 1, 2, 3, 2), 206),
Sample(False, (2, 2, 1, 2, 4, 1), 207),
Sample(False, (2, 2, 1, 2, 4, 2), 208),
Sample(True, (2, 2, 1, 3, 1, 1), 209),
Sample(True, (2, 2, 1, 3, 1, 2), 210),
Sample(True, (2, 2, 1, 3, 2, 1), 211),
Sample(True, (2, 2, 1, 3, 2, 2), 212),
Sample(True, (2, 2, 1, 3, 3, 1), 213),
Sample(True, (2, 2, 1, 3, 3, 2), 214),
Sample(False, (2, 2, 1, 3, 4, 1), 215),
Sample(False, (2, 2, 1, 3, 4, 2), 216),
Sample(True, (2, 2, 2, 1, 1, 1), 217),
Sample(True, (2, 2, 2, 1, 1, 2), 218),
Sample(True, (2, 2, 2, 1, 2, 1), 219),
Sample(True, (2, 2, 2, 1, 2, 2), 220),
Sample(True, (2, 2, 2, 1, 3, 1), 221),
Sample(True, (2, 2, 2, 1, 3, 2), 222),
Sample(False, (2, 2, 2, 1, 4, 1), 223),
Sample(False, (2, 2, 2, 1, 4, 2), 224),
Sample(True, (2, 2, 2, 2, 1, 1), 225),
Sample(True, (2, 2, 2, 2, 1, 2), 226),
Sample(True, (2, 2, 2, 2, 2, 1), 227),
Sample(True, (2, 2, 2, 2, 2, 2), 228),
Sample(True, (2, 2, 2, 2, 3, 1), 229),
Sample(True, (2, 2, 2, 2, 3, 2), 230),
Sample(False, (2, 2, 2, 2, 4, 1), 231),
Sample(False, (2, 2, 2, 2, 4, 2), 232),
Sample(True, (2, 2, 2, 3, 1, 1), 233),
Sample(True, (2, 2, 2, 3, 1, 2), 234),
Sample(True, (2, 2, 2, 3, 2, 1), 235),
Sample(True, (2, 2, 2, 3, 2, 2), 236),
Sample(True, (2, 2, 2, 3, 3, 1), 237),
Sample(True, (2, 2, 2, 3, 3, 2), 238),
Sample(False, (2, 2, 2, 3, 4, 1), 239),
Sample(False, (2, 2, 2, 3, 4, 2), 240),
Sample(False, (2, 3, 1, 1, 1, 1), 241),
Sample(False, (2, 3, 1, 1, 1, 2), 242),
Sample(False, (2, 3, 1, 1, 2, 1), 243),
Sample(False, (2, 3, 1, 1, 2, 2), 244),
Sample(True, (2, 3, 1, 1, 3, 1), 245),
Sample(True, (2, 3, 1, 1, 3, 2), 246),
Sample(False, (2, 3, 1, 1, 4, 1), 247),
Sample(False, (2, 3, 1, 1, 4, 2), 248),
Sample(False, (2, 3, 1, 2, 1, 1), 249),
Sample(False, (2, 3, 1, 2, 1, 2), 250),
Sample(False, (2, 3, 1, 2, 2, 1), 251),
Sample(False, (2, 3, 1, 2, 2, 2), 252),
Sample(False, (2, 3, 1, 2, 3, 1), 253),
Sample(False, (2, 3, 1, 2, 3, 2), 254),
Sample(False, (2, 3, 1, 2, 4, 1), 255),
Sample(False, (2, 3, 1, 2, 4, 2), 256),
Sample(False, (2, 3, 1, 3, 1, 1), 257),
Sample(False, (2, 3, 1, 3, 1, 2), 258),
Sample(False, (2, 3, 1, 3, 2, 1), 259),
Sample(False, (2, 3, 1, 3, 2, 2), 260),
Sample(False, (2, 3, 1, 3, 3, 1), 261),
Sample(False, (2, 3, 1, 3, 3, 2), 262),
Sample(False, (2, 3, 1, 3, 4, 1), 263),
Sample(False, (2, 3, 1, 3, 4, 2), 264),
Sample(False, (2, 3, 2, 1, 1, 1), 265),
Sample(False, (2, 3, 2, 1, 1, 2), 266),
Sample(False, (2, 3, 2, 1, 2, 1), 267),
Sample(False, (2, 3, 2, 1, 2, 2), 268),
Sample(True, (2, 3, 2, 1, 3, 1), 269),
Sample(True, (2, 3, 2, 1, 3, 2), 270),
Sample(False, (2, 3, 2, 1, 4, 1), 271),
Sample(False, (2, 3, 2, 1, 4, 2), 272),
Sample(False, (2, 3, 2, 2, 1, 1), 273),
Sample(False, (2, 3, 2, 2, 1, 2), 274),
Sample(False, (2, 3, 2, 2, 2, 1), 275),
Sample(False, (2, 3, 2, 2, 2, 2), 276),
Sample(False, (2, 3, 2, 2, 3, 1), 277),
Sample(False, (2, 3, 2, 2, 3, 2), 278),
Sample(False, (2, 3, 2, 2, 4, 1), 279),
Sample(False, (2, 3, 2, 2, 4, 2), 280),
Sample(False, (2, 3, 2, 3, 1, 1), 281),
Sample(False, (2, 3, 2, 3, 1, 2), 282),
Sample(False, (2, 3, 2, 3, 2, 1), 283),
Sample(False, (2, 3, 2, 3, 2, 2), 284),
Sample(False, (2, 3, 2, 3, 3, 1), 285),
Sample(False, (2, 3, 2, 3, 3, 2), 286),
Sample(False, (2, 3, 2, 3, 4, 1), 287),
Sample(False, (2, 3, 2, 3, 4, 2), 288),
Sample(True, (3, 1, 1, 1, 1, 1), 289),
Sample(True, (3, 1, 1, 1, 1, 2), 290),
Sample(True, (3, 1, 1, 1, 2, 1), 291),
Sample(True, (3, 1, 1, 1, 2, 2), 292),
Sample(True, (3, 1, 1, 1, 3, 1), 293),
Sample(True, (3, 1, 1, 1, 3, 2), 294),
Sample(False, (3, 1, 1, 1, 4, 1), 295),
Sample(False, (3, 1, 1, 1, 4, 2), 296),
Sample(True, (3, 1, 1, 2, 1, 1), 297),
Sample(True, (3, 1, 1, 2, 1, 2), 298),
Sample(True, (3, 1, 1, 2, 2, 1), 299),
Sample(True, (3, 1, 1, 2, 2, 2), 300),
Sample(True, (3, 1, 1, 2, 3, 1), 301),
Sample(True, (3, 1, 1, 2, 3, 2), 302),
Sample(False, (3, 1, 1, 2, 4, 1), 303),
Sample(False, (3, 1, 1, 2, 4, 2), 304),
Sample(True, (3, 1, 1, 3, 1, 1), 305),
Sample(True, (3, 1, 1, 3, 1, 2), 306),
Sample(True, (3, 1, 1, 3, 2, 1), 307),
Sample(True, (3, 1, 1, 3, 2, 2), 308),
Sample(True, (3, 1, 1, 3, 3, 1), 309),
Sample(True, (3, 1, 1, 3, 3, 2), 310),
Sample(False, (3, 1, 1, 3, 4, 1), 311),
Sample(False, (3, 1, 1, 3, 4, 2), 312),
Sample(True, (3, 1, 2, 1, 1, 1), 313),
Sample(True, (3, 1, 2, 1, 1, 2), 314),
Sample(True, (3, 1, 2, 1, 2, 1), 315),
Sample(True, (3, 1, 2, 1, 2, 2), 316),
Sample(True, (3, 1, 2, 1, 3, 1), 317),
Sample(True, (3, 1, 2, 1, 3, 2), 318),
Sample(False, (3, 1, 2, 1, 4, 1), 319),
Sample(False, (3, 1, 2, 1, 4, 2), 320),
Sample(True, (3, 1, 2, 2, 1, 1), 321),
Sample(True, (3, 1, 2, 2, 1, 2), 322),
Sample(True, (3, 1, 2, 2, 2, 1), 323),
Sample(True, (3, 1, 2, 2, 2, 2), 324),
Sample(True, (3, 1, 2, 2, 3, 1), 325),
Sample(True, (3, 1, 2, 2, 3, 2), 326),
Sample(False, (3, 1, 2, 2, 4, 1), 327),
Sample(False, (3, 1, 2, 2, 4, 2), 328),
Sample(True, (3, 1, 2, 3, 1, 1), 329),
Sample(True, (3, 1, 2, 3, 1, 2), 330),
Sample(True, (3, 1, 2, 3, 2, 1), 331),
Sample(True, (3, 1, 2, 3, 2, 2), 332),
Sample(True, (3, 1, 2, 3, 3, 1), 333),
Sample(True, (3, 1, 2, 3, 3, 2), 334),
Sample(False, (3, 1, 2, 3, 4, 1), 335),
Sample(False, (3, 1, 2, 3, 4, 2), 336),
Sample(True, (3, 2, 1, 1, 1, 1), 337),
Sample(True, (3, 2, 1, 1, 1, 2), 338),
Sample(True, (3, 2, 1, 1, 2, 1), 339),
Sample(True, (3, 2, 1, 1, 2, 2), 340),
Sample(True, (3, 2, 1, 1, 3, 1), 341),
Sample(True, (3, 2, 1, 1, 3, 2), 342),
Sample(False, (3, 2, 1, 1, 4, 1), 343),
Sample(False, (3, 2, 1, 1, 4, 2), 344),
Sample(True, (3, 2, 1, 2, 1, 1), 345),
Sample(True, (3, 2, 1, 2, 1, 2), 346),
Sample(True, (3, 2, 1, 2, 2, 1), 347),
Sample(True, (3, 2, 1, 2, 2, 2), 348),
Sample(True, (3, 2, 1, 2, 3, 1), 349),
Sample(True, (3, 2, 1, 2, 3, 2), 350),
Sample(False, (3, 2, 1, 2, 4, 1), 351),
Sample(False, (3, 2, 1, 2, 4, 2), 352),
Sample(True, (3, 2, 1, 3, 1, 1), 353),
Sample(True, (3, 2, 1, 3, 1, 2), 354),
Sample(True, (3, 2, 1, 3, 2, 1), 355),
Sample(True, (3, 2, 1, 3, 2, 2), 356),
Sample(True, (3, 2, 1, 3, 3, 1), 357),
Sample(True, (3, 2, 1, 3, 3, 2), 358),
Sample(False, (3, 2, 1, 3, 4, 1), 359),
Sample(False, (3, 2, 1, 3, 4, 2), 360),
Sample(True, (3, 2, 2, 1, 1, 1), 361),
Sample(True, (3, 2, 2, 1, 1, 2), 362),
Sample(True, (3, 2, 2, 1, 2, 1), 363),
Sample(True, (3, 2, 2, 1, 2, 2), 364),
Sample(True, (3, 2, 2, 1, 3, 1), 365),
Sample(True, (3, 2, 2, 1, 3, 2), 366),
Sample(False, (3, 2, 2, 1, 4, 1), 367),
Sample(False, (3, 2, 2, 1, 4, 2), 368),
Sample(True, (3, 2, 2, 2, 1, 1), 369),
Sample(True, (3, 2, 2, 2, 1, 2), 370),
Sample(True, (3, 2, 2, 2, 2, 1), 371),
Sample(True, (3, 2, 2, 2, 2, 2), 372),
Sample(True, (3, 2, 2, 2, 3, 1), 373),
Sample(True, (3, 2, 2, 2, 3, 2), 374),
Sample(False, (3, 2, 2, 2, 4, 1), 375),
Sample(False, (3, 2, 2, 2, 4, 2), 376),
Sample(True, (3, 2, 2, 3, 1, 1), 377),
Sample(True, (3, 2, 2, 3, 1, 2), 378),
Sample(True, (3, 2, 2, 3, 2, 1), 379),
Sample(True, (3, 2, 2, 3, 2, 2), 380),
Sample(True, (3, 2, 2, 3, 3, 1), 381),
Sample(True, (3, 2, 2, 3, 3, 2), 382),
Sample(False, (3, 2, 2, 3, 4, 1), 383),
Sample(False, (3, 2, 2, 3, 4, 2), 384),
Sample(False, (3, 3, 1, 1, 1, 1), 385),
Sample(False, (3, 3, 1, 1, 1, 2), 386),
Sample(False, (3, 3, 1, 1, 2, 1), 387),
Sample(False, (3, 3, 1, 1, 2, 2), 388),
Sample(True, (3, 3, 1, 1, 3, 1), 389),
Sample(True, (3, 3, 1, 1, 3, 2), 390),
Sample(False, (3, 3, 1, 1, 4, 1), 391),
Sample(False, (3, 3, 1, 1, 4, 2), 392),
Sample(False, (3, 3, 1, 2, 1, 1), 393),
Sample(False, (3, 3, 1, 2, 1, 2), 394),
Sample(False, (3, 3, 1, 2, 2, 1), 395),
Sample(False, (3, 3, 1, 2, 2, 2), 396),
Sample(False, (3, 3, 1, 2, 3, 1), 397),
Sample(False, (3, 3, 1, 2, 3, 2), 398),
Sample(False, (3, 3, 1, 2, 4, 1), 399),
Sample(False, (3, 3, 1, 2, 4, 2), 400),
Sample(False, (3, 3, 1, 3, 1, 1), 401),
Sample(False, (3, 3, 1, 3, 1, 2), 402),
Sample(False, (3, 3, 1, 3, 2, 1), 403),
Sample(False, (3, 3, 1, 3, 2, 2), 404),
Sample(False, (3, 3, 1, 3, 3, 1), 405),
Sample(False, (3, 3, 1, 3, 3, 2), 406),
Sample(False, (3, 3, 1, 3, 4, 1), 407),
Sample(False, (3, 3, 1, 3, 4, 2), 408),
Sample(False, (3, 3, 2, 1, 1, 1), 409),
Sample(False, (3, 3, 2, 1, 1, 2), 410),
Sample(False, (3, 3, 2, 1, 2, 1), 411),
Sample(False, (3, 3, 2, 1, 2, 2), 412),
Sample(True, (3, 3, 2, 1, 3, 1), 413),
Sample(True, (3, 3, 2, 1, 3, 2), 414),
Sample(False, (3, 3, 2, 1, 4, 1), 415),
Sample(False, (3, 3, 2, 1, 4, 2), 416),
Sample(False, (3, 3, 2, 2, 1, 1), 417),
Sample(False, (3, 3, 2, 2, 1, 2), 418),
Sample(False, (3, 3, 2, 2, 2, 1), 419),
Sample(False, (3, 3, 2, 2, 2, 2), 420),
Sample(False, (3, 3, 2, 2, 3, 1), 421),
Sample(False, (3, 3, 2, 2, 3, 2), 422),
Sample(False, (3, 3, 2, 2, 4, 1), 423),
Sample(False, (3, 3, 2, 2, 4, 2), 424),
Sample(False, (3, 3, 2, 3, 1, 1), 425),
Sample(False, (3, 3, 2, 3, 1, 2), 426),
Sample(False, (3, 3, 2, 3, 2, 1), 427),
Sample(False, (3, 3, 2, 3, 2, 2), 428),
Sample(False, (3, 3, 2, 3, 3, 1), 429),
Sample(False, (3, 3, 2, 3, 3, 2), 430),
Sample(False, (3, 3, 2, 3, 4, 1), 431),
Sample(False, (3, 3, 2, 3, 4, 2), 432))
| 40.239292
| 54
| 0.474191
| 15,482
| 70,459
| 2.157279
| 0.030229
| 0.067068
| 0.03036
| 0.058774
| 0.988353
| 0.988353
| 0.984311
| 0.968532
| 0.948801
| 0.947843
| 0
| 0.277326
| 0.232859
| 70,459
| 1,750
| 55
| 40.262286
| 0.340579
| 0.000752
| 0
| 0.853111
| 0
| 0
| 0.000908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001728
| false
| 0
| 0
| 0.000576
| 0.003456
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.