hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce829c91eaf75b1ed2171380cb8913c06dcb1070
| 1,849
|
py
|
Python
|
candejar/cid/__init__.py
|
Ricyteach/candejar
|
a83a83a377f7d757568e373f2ff6dcb4e1661e70
|
[
"MIT"
] | null | null | null |
candejar/cid/__init__.py
|
Ricyteach/candejar
|
a83a83a377f7d757568e373f2ff6dcb4e1661e70
|
[
"MIT"
] | null | null | null |
candejar/cid/__init__.py
|
Ricyteach/candejar
|
a83a83a377f7d757568e373f2ff6dcb4e1661e70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Sub package for reading/writing (parsing/formatting) lines of .cid files."""
from typing import TypeVar, Type
from .cidlineclasses import A1, A2, C1, C2, C3, C4, C5, E1, Stop, D1, D2Isotropic, D2Orthotropic, D2Duncan, D3Duncan, D4Duncan, D2Over, D2Hardin, D2HardinTRIA, D2Interface, D2Composite, D2MohrCoulomb, B1Alum, B2AlumA, B2AlumDWSD, B2AlumDLRFD, B3AlumADLRFD, B1Steel, B2SteelA, B2SteelDWSD, B2SteelDLRFD, B2bSteel, B2cSteel, B2dSteel, B3SteelADLRFD, B1Plastic, B2Plastic, B3PlasticAGeneral, B3PlasticASmooth, B3PlasticAProfile, B3bPlasticAProfile, B3PlasticDWSD, B3PlasticDLRFD, B4Plastic, B1Concrete, B2Concrete, B3Concrete, B4ConcreteCase1_2, B4ConcreteCase3, B4bConcreteCase3, B4ConcreteCase4, B4ConcreteCase5, B5Concrete, B1Basic, B2Basic
from .cidline import CidLine
CidLineType = Type[CidLine]
CidSubLine = TypeVar("CidSubLine", A2, C3, C4, C5, D1, E1)
# below definitions for read/write of cid objects
SEQ_LINE_TYPES = (A2, C3, C4, C5, D1, E1) # note: needs to be ordered
TOP_LEVEL_TYPES = set(SEQ_LINE_TYPES) | {A1, C1, C2, Stop} # marks end of B1 etc. or D2 etc. sub lines
CIDL_FORMAT_TYPES = set(SEQ_LINE_TYPES) - {A2} # lines types that can use cidL format
__all__ = "A1, A2, C1, C2, C3, C4, C5, E1, Stop, D1, D2Isotropic, D2Orthotropic, D2Duncan, D3Duncan, D4Duncan, D2Over, D2Hardin, D2HardinTRIA, D2Interface, D2Composite, D2MohrCoulomb, B1Alum, B2AlumA, B2AlumDWSD, B2AlumDLRFD, B3AlumADLRFD, B1Steel, B2SteelA, B2SteelDWSD, B2SteelDLRFD, B2bSteel, B2cSteel, B2dSteel, B3SteelADLRFD, B1Plastic, B2Plastic, B3PlasticAGeneral, B3PlasticASmooth, B3PlasticAProfile, B3bPlasticAProfile, B3PlasticDWSD, B3PlasticDLRFD, B4Plastic, B1Concrete, B2Concrete, B3Concrete, B4ConcreteCase1_2, B4ConcreteCase3, B4bConcreteCase3, B4ConcreteCase4, B4ConcreteCase5, B5Concrete, B1Basic, B2Basic".split(", ")
| 97.315789
| 640
| 0.78583
| 208
| 1,849
| 6.908654
| 0.514423
| 0.011134
| 0.016701
| 0.011134
| 0.74739
| 0.719555
| 0.702853
| 0.702853
| 0.702853
| 0.702853
| 0
| 0.08369
| 0.114657
| 1,849
| 18
| 641
| 102.722222
| 0.794136
| 0.134667
| 0
| 0
| 0
| 0.111111
| 0.392453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
ceae5a7d16efbed0c1b9385c2f0c43199af1d60c
| 24,720
|
py
|
Python
|
tests/test_testcase.py
|
WalkerWhite/introcs-python
|
c4bcc697e49a371a4254ae9cb883cf06aa6f9e4f
|
[
"MIT"
] | 1
|
2021-12-26T03:15:59.000Z
|
2021-12-26T03:15:59.000Z
|
tests/test_testcase.py
|
WalkerWhite/introcs-python
|
c4bcc697e49a371a4254ae9cb883cf06aa6f9e4f
|
[
"MIT"
] | null | null | null |
tests/test_testcase.py
|
WalkerWhite/introcs-python
|
c4bcc697e49a371a4254ae9cb883cf06aa6f9e4f
|
[
"MIT"
] | 2
|
2021-01-20T01:38:08.000Z
|
2021-07-17T02:15:37.000Z
|
"""
Unit test for the test case package
This tests the custom unittest package that we provide in introcs to make everything
easier.
:author: Walker M. White (wmw2)
:version: July 13, 2018
"""
import unittest
import numpy
# This is necessary for command interception
display = print
thequit = quit
class UnitTestTest(unittest.TestCase):
"""
Unit test for the url tools package
"""
def setUp(self):
"""
Initializes a unit test
"""
locs = locals()
globs = globals()
globs['__builtins__']['quit'] = self.doquit
globs['__builtins__']['print'] = self.doprint
self._test = __import__('introcs.testcase',globs,locs)
self.clear()
def tearDown(self):
"""
Completes a unit test
"""
globs = globals()
globs['__builtins__']['quit'] = display
globs['__builtins__']['print'] = thequit
self._test = None
def doquit(self):
"""
Performs a faux application quit
"""
self._quit = True
def isquit(self):
"""
Returns true if the assert quit the program.
"""
return self._quit
def doprint(self, *objects, sep=' ', end='\n', file=None, flush=False):
"""
Captures a print statement to an internal attribute for recording.
"""
from io import StringIO
outs = StringIO()
display(*objects,sep=sep,end=end,file=outs,flush=flush)
self._outp.append(outs.getvalue())
outs.close()
def getprint(self):
"""
Returns the attributes recorded form print statements.
"""
return self._outp
def clear(self):
"""
Resets the recording of any assert messages.
"""
self._quit = False
self._outp = []
def test03_quit(self):
"""
Tests the quit command and interception.
"""
def invoke(): # Since this function unwraps
self._test.quit_with_error('Hello world!')
invoke()
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),'Hello world!')
self.assertEqual(self._outp[1][:7],'Line 85')
self.clear()
def test04_asserts_basic(self):
"""
Tests the basic unit test asserts.
"""
self._test.assert_equals(1,1)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_equals(1,2) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),'assert_equals: expected 1 but instead got 2')
self.assertEqual(self._outp[1][:7],'Line 99')
self.clear()
self._test.assert_not_equals(1,2)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_not_equals(1,1) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),'assert_not_equals: expected something different from 1')
self.assertEqual(self._outp[1][:8],'Line 109')
self.clear()
self._test.assert_true(1==1)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_true(0) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),'assert_true: 0 evaluates to False')
self.assertEqual(self._outp[1][:8],'Line 119')
self.clear()
self._test.assert_false(1==2)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_false(1) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),'assert_false: 1 evaluates to True')
self.assertEqual(self._outp[1][:8],'Line 129')
self.clear()
def test05_asserts_floats(self):
"""
Tests the float unit test asserts.
"""
self._test.assert_floats_equal(1.0000001,1.0000002)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_floats_equal('a',1) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_floats_equal: first argument 'a' is not a number")
self.assertEqual(self._outp[1][:8],'Line 143')
self.clear()
self._test.assert_floats_equal(1,'a') # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_floats_equal: second argument 'a' is not a number")
self.assertEqual(self._outp[1][:8],'Line 149')
self.clear()
self._test.assert_floats_equal(1.1,1.2) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),'assert_floats_equal: expected 1.1 but instead got 1.2')
self.assertEqual(self._outp[1][:8],'Line 155')
self.clear()
self._test.assert_floats_not_equal(1.1,1.2)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_floats_not_equal('a',1) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_floats_not_equal: first argument 'a' is not a number")
self.assertEqual(self._outp[1][:8],'Line 165')
self.clear()
self._test.assert_floats_not_equal(1,'a') # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_floats_not_equal: second argument 'a' is not a number")
self.assertEqual(self._outp[1][:8],'Line 171')
self.clear()
self._test.assert_floats_not_equal(1.0000001,1.0000002) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),'assert_floats_not_equal: expected something different from 1.0000001')
self.assertEqual(self._outp[1][:8],'Line 177')
self.clear()
self._test.assert_float_lists_equal([2,1.0000001],(2,1.0000002))
self.assertFalse(self.isquit())
self.clear()
self._test.assert_float_lists_equal('a',[1]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_equal: first argument 'a' is not a sequence")
self.assertEqual(self._outp[1][:8],'Line 187')
self.clear()
self._test.assert_float_lists_equal((1,),'a') # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_equal: second argument 'a' is not a sequence")
self.assertEqual(self._outp[1][:8],'Line 193')
self.clear()
self._test.assert_float_lists_equal((1,'a'),[2,1]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_equal: first argument (1, 'a') has non-numeric values")
self.assertEqual(self._outp[1][:8],'Line 199')
self.clear()
self._test.assert_float_lists_equal([2,1],(1,'a')) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_equal: second argument (1, 'a') has non-numeric values")
self.assertEqual(self._outp[1][:8],'Line 205')
self.clear()
self._test.assert_float_lists_equal([2],(2,1)) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_equal: sequences [2] and (2, 1) have different sizes")
self.assertEqual(self._outp[1][:8],'Line 211')
self.clear()
self._test.assert_float_lists_equal((2,1),[2]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_equal: sequences (2, 1) and [2] have different sizes")
self.assertEqual(self._outp[1][:8],'Line 217')
self.clear()
self._test.assert_float_lists_equal([1.1,2.1],[1.1,2.2]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),'assert_float_lists_equal: expected [1.1, 2.1] but instead got [1.1, 2.2]')
self.assertEqual(self._outp[1][:8],'Line 223')
self.clear()
self._test.assert_float_lists_equal([[1,2],[3,4]],[[1,2],[3,4]])
self.assertFalse(self.isquit())
self.clear()
self._test.assert_float_lists_equal([[1,2],[3,4]],[[1,2],[3,5]]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_equal: expected [[1, 2], [3, 4]] but instead got [[1, 2], [3, 5]]")
self.assertEqual(self._outp[1][:8],'Line 233')
self.clear()
self._test.assert_float_lists_equal([[1,2],[3,4]],[[1,2]]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_equal: sequences [[1, 2], [3, 4]] and [[1, 2]] have different sizes")
self.assertEqual(self._outp[1][:8],'Line 239')
self.clear()
self._test.assert_float_lists_equal([[1,2],[3,4]],[[1,2],[3,'a']]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_equal: second argument [[1, 2], [3, 'a']] has non-numeric values")
self.assertEqual(self._outp[1][:8],'Line 245')
self.clear()
self._test.assert_float_lists_equal([[1,2],[3,'a']],[[1,2],[3,4]]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_equal: first argument [[1, 2], [3, 'a']] has non-numeric values")
self.assertEqual(self._outp[1][:8],'Line 251')
self.clear()
self._test.assert_float_lists_not_equal([1.1,2.1],(1.1,2.2))
self.assertFalse(self.isquit())
self.clear()
self._test.assert_float_lists_not_equal('a',[1]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_not_equal: first argument 'a' is not a sequence")
self.assertEqual(self._outp[1][:8],'Line 261')
self.clear()
self._test.assert_float_lists_not_equal((1,),'a') # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_not_equal: second argument 'a' is not a sequence")
self.assertEqual(self._outp[1][:8],'Line 267')
self.clear()
self._test.assert_float_lists_not_equal((1,'a'),[2,1]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_not_equal: first argument (1, 'a') has non-numeric values")
self.assertEqual(self._outp[1][:8],'Line 273')
self.clear()
self._test.assert_float_lists_not_equal([2,1],(1,'a')) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_not_equal: second argument (1, 'a') has non-numeric values")
self.assertEqual(self._outp[1][:8],'Line 279')
self.clear()
self._test.assert_float_lists_not_equal([2],(2,1)) # Pay attention to the line number
self.assertFalse(self.isquit())
self.clear()
self._test.assert_float_lists_not_equal((2,1),[2]) # Pay attention to the line number
self.assertFalse(self.isquit())
self.clear()
self._test.assert_float_lists_not_equal([2,1.0000001],(2,1.0000002)) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),'assert_float_lists_not_equal: expected something different from [2, 1.0000001]')
self.assertEqual(self._outp[1][:8],'Line 293')
self.clear()
self._test.assert_float_lists_not_equal([[1,2],[3,4]],[[1,2],[3,5]])
self.assertFalse(self.isquit())
self.clear()
self._test.assert_float_lists_not_equal([[1,2],[3,4]],[[1,2],[3,4]]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_not_equal: expected something different from [[1, 2], [3, 4]]")
self.assertEqual(self._outp[1][:8],'Line 303')
self.clear()
self._test.assert_float_lists_not_equal([[1,2],[3,4]],[[1,2],[3,'a']]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_not_equal: second argument [[1, 2], [3, 'a']] has non-numeric values")
self.assertEqual(self._outp[1][:8],'Line 309')
self.clear()
self._test.assert_float_lists_not_equal([[1,2],[3,'a']],[[1,2],[3,4]]) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_float_lists_not_equal: first argument [[1, 2], [3, 'a']] has non-numeric values")
self.assertEqual(self._outp[1][:8],'Line 315')
self.clear()
def test06_asserts_error(self):
"""
Tests the enforcement assertion
"""
# Basic enforcement
def func1(s):
assert type(s) == str
assert s != ''
return s[0]
# Enforced with other errors
def func2(s):
if type(s) != str:
raise TypeError()
if s == '':
raise ValueError(1,3)
return s[0]
# Multiple arguments
def func3(x,y):
assert type(x) == int, repr(x)+' is bad'
assert type(y) == int
return x/y
self._test.assert_error(1,2) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: argument 1 is not callable")
self.assertEqual(self._outp[1][:8],'Line 347')
self.clear()
self._test.assert_error(func1,'a') # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: call func1('a') did not crash but instead returned 'a'")
self.assertEqual(self._outp[1][:8],'Line 353')
self.clear()
self._test.assert_error(func1,2)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_error(func1,'')
self.assertFalse(self.isquit())
self.clear()
self._test.assert_error(func2,'a')
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: call func2('a') did not crash but instead returned 'a'")
self.assertEqual(self._outp[1][:8],'Line 367')
self.clear()
self._test.assert_error(func2,2)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: call func2(2) crashed with TypeError, not AssertionError")
self.assertEqual(self._outp[1][:8],'Line 373')
self.clear()
self._test.assert_error(func2,2,error=TypeError)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_error(func2,'',error=TypeError)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: call func2('') crashed with ValueError, not TypeError")
self.assertEqual(self._outp[1][:8],'Line 383')
self.clear()
self._test.assert_error(func2,'',error=ValueError)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_error(func3,3,2) # Pay attention to the line number
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: call func3(3, 2) did not crash but instead returned 1.5")
self.assertEqual(self._outp[1][:8],'Line 393')
self.clear()
self._test.assert_error(func3,3.0,2)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_error(func3,3.0,2,error=TypeError)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: call func3(3.0, 2) crashed with AssertionError, not TypeError")
self.assertEqual(self._outp[1][:8],'Line 403')
self.clear()
self._test.assert_error(func3,3,2.0)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_error(func3,3,0)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: call func3(3, 0) crashed with ZeroDivisionError, not AssertionError")
self.assertEqual(self._outp[1][:8],'Line 413')
self.clear()
self._test.assert_error(func3,3,0,error=ZeroDivisionError)
self.assertFalse(self.isquit())
self.clear()
self._test.assert_error(func2,2,error=TypeError,reason='a')
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: TypeError has no reason, but expected 'a'")
self.assertEqual(self._outp[1][:8],'Line 423')
self.clear()
self._test.assert_error(func2,2,error=TypeError,reason=())
self.assertFalse(self.isquit())
self.clear()
self._test.assert_error(func2,'',error=ValueError,reason=(1,3))
self.assertFalse(self.isquit())
self.clear()
self._test.assert_error(func2,'',error=ValueError,reason='a')
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: ValueError has reason (1, 3), not 'a'")
self.assertEqual(self._outp[1][:8],'Line 437')
self.clear()
self._test.assert_error(func3,'a',2,reason='a')
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),"assert_error: AssertionError has reason \"'a' is bad\", not 'a'")
self.assertEqual(self._outp[1][:8],'Line 443')
self.clear()
self._test.assert_error(func3,True,2,reason='True is bad')
self.assertFalse(self.isquit())
self.clear()
self._test.assert_error(func3,2,True,reason=())
self.assertFalse(self.isquit())
self.clear()
def test07_messages(self):
"""
Tests the custom assert messages
"""
message = 'Test1'
self._test.assert_equals(1, 2, message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test2'
self._test.assert_not_equals(1, 1, message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test3'
self._test.assert_true(False, message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test4'
self._test.assert_false(True, message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test5'
self._test.assert_floats_equal(1,1.001, message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test6'
self._test.assert_floats_not_equal(1,1.000001, message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test7'
self._test.assert_floats_not_equal(1,1.000001, message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test8'
self._test.assert_float_lists_equal([1,2],[1.001,2], message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test9'
self._test.assert_float_lists_not_equal([1,2],[1.000001,2], message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
def func3(x,y):
assert type(x) == int, repr(x)+' is bad'
assert type(y) == int
return x/y
message = 'Test9'
self._test.assert_error(1,2,message=message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test10'
self._test.assert_error(func3,3,2,message=message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test11'
self._test.assert_error(func3,3,0,message=message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test12'
self._test.assert_error(func3,True,0,error=TypeError,message=message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test13'
self._test.assert_error(func3,True,0,reason=(),message=message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
message = 'Test14'
self._test.assert_error(func3,True,0,reason='a',message=message)
self.assertTrue(self.isquit())
self.assertEqual(self._outp[0].strip(),message)
self.clear()
def test01_checks(self):
"""
Tests the type checks.
"""
self.assertTrue(self._test.isint(1.0))
self.assertTrue(self._test.isint(1))
self.assertTrue(self._test.isint('1'))
self.assertFalse(self._test.isint('1.0'))
self.assertFalse(self._test.isint('1e1'))
self.assertFalse(self._test.isint('e1'))
self.assertTrue(self._test.isfloat(1.0))
self.assertTrue(self._test.isfloat(1))
self.assertTrue(self._test.isfloat('1.0'))
self.assertTrue(self._test.isfloat('1e1'))
self.assertFalse(self._test.isfloat('e1'))
self.assertTrue(self._test.isbool(True))
self.assertTrue(self._test.isbool(1.0))
self.assertTrue(self._test.isbool(1))
self.assertTrue(self._test.isbool('True'))
self.assertTrue(self._test.isbool('False'))
self.assertFalse(self._test.isbool('true'))
self.assertFalse(self._test.isbool('1'))
self.assertFalse(self._test.isbool('1.0'))
self.assertFalse(self._test.isbool('1e1'))
self.assertFalse(self._test.isbool('e1'))
def test02_compares(self):
"""
Tests the float comparisons.
"""
self.assertTrue(self._test.isclose(1,1.000001))
self.assertFalse(self._test.isclose(1,1.001))
self.assertEqual(self._test.isclose( (1,2),(1.000001,2.001) ), [True,False])
self.assertEqual(self._test.isclose( (1,2),(1.001,2.000001) ), [False,True])
self.assertEqual(self._test.isclose( ((1,2),(3,4)), ((1,2.0000001),(5,4)) ),
[[True,True],[False,True]])
self.assertEqual(self._test.allclose( (1,2),(1.000001,2.001) ),False)
self.assertEqual(self._test.allclose( (1,2),(1.001,2.000001) ),False)
self.assertEqual(self._test.allclose( (1,2),(1.000001,2.000001) ),True)
self.assertEqual(self._test.allclose( ((1,2),(3,4)), ((1,2.0000001),(5,4)) ), False)
self.assertEqual(self._test.allclose( ((1,2),(3,4)), ((1,2.0000001),(3,4)) ), True)
if __name__=='__main__':
unittest.main( )
| 40.927152
| 137
| 0.602427
| 3,190
| 24,720
| 4.506583
| 0.081505
| 0.062326
| 0.138773
| 0.155189
| 0.842168
| 0.816082
| 0.787841
| 0.738592
| 0.705829
| 0.655815
| 0
| 0.046297
| 0.246804
| 24,720
| 603
| 138
| 40.995025
| 0.725818
| 0.080947
| 0
| 0.429864
| 0
| 0.006787
| 0.145735
| 0.025923
| 0
| 0
| 0
| 0
| 0.665158
| 1
| 0.042986
| false
| 0
| 0.00905
| 0
| 0.067873
| 0.011312
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ceb71354700747f8520f3991f32c2e68bf2e4396
| 4,759
|
py
|
Python
|
PyP100/PyP110.py
|
seeya/TapoP100
|
2a82bbfbb8de18786c15be4732a89f48c94f4141
|
[
"MIT"
] | null | null | null |
PyP100/PyP110.py
|
seeya/TapoP100
|
2a82bbfbb8de18786c15be4732a89f48c94f4141
|
[
"MIT"
] | null | null | null |
PyP100/PyP110.py
|
seeya/TapoP100
|
2a82bbfbb8de18786c15be4732a89f48c94f4141
|
[
"MIT"
] | null | null | null |
from PyP100 import PyP100
import json
import logging
import time
_LOGGER = logging.getLogger(__name__)
class P110(PyP100.P100):
def getEnergyUsage(self):
URL = f"http://{self.ipAddress}/app?token={self.token}"
Payload = {
"method": "get_energy_usage",
"requestTimeMils": int(round(time.time() * 1000)),
}
headers = {
"Cookie": self.cookie
}
EncryptedPayload = self.tpLinkCipher.encrypt(json.dumps(Payload))
SecurePassthroughPayload = {
"method":"securePassthrough",
"params":{
"request": EncryptedPayload
}
}
_LOGGER.debug("getEnergyUsage %s", self.ipAddress)
r = self.session.post(URL, json=SecurePassthroughPayload, headers=headers, timeout=2)
decryptedResponse = self.tpLinkCipher.decrypt(r.json()["result"]["response"])
return json.loads(decryptedResponse)
def turnOn(self):
URL = f"http://{self.ipAddress}/app?token={self.token}"
Payload = {
"method": "set_device_info",
"params":{
"device_on": True
},
"requestTimeMils": int(round(time.time() * 1000)),
"terminalUUID": "0A950402-7224-46EB-A450-7362CDB902A2"
}
headers = {
"Cookie": self.cookie
}
EncryptedPayload = self.tpLinkCipher.encrypt(json.dumps(Payload))
SecurePassthroughPayload = {
"method": "securePassthrough",
"params":{
"request": EncryptedPayload
}
}
r = self.session.post(URL, json=SecurePassthroughPayload, headers=headers, timeout=2)
decryptedResponse = self.tpLinkCipher.decrypt(r.json()["result"]["response"])
return json.loads(decryptedResponse)
def turnOff(self):
URL = f"http://{self.ipAddress}/app?token={self.token}"
Payload = {
"method": "set_device_info",
"params":{
"device_on": False
},
"requestTimeMils": int(round(time.time() * 1000)),
"terminalUUID": "0A950402-7224-46EB-A450-7362CDB902A2"
}
headers = {
"Cookie": self.cookie
}
EncryptedPayload = self.tpLinkCipher.encrypt(json.dumps(Payload))
SecurePassthroughPayload = {
"method": "securePassthrough",
"params":{
"request": EncryptedPayload
}
}
r = self.session.post(URL, json=SecurePassthroughPayload, headers=headers, timeout=2)
decryptedResponse = self.tpLinkCipher.decrypt(r.json()["result"]["response"])
return json.loads(decryptedResponse)
def turnOnWithDelay(self, delay):
URL = f"http://{self.ipAddress}/app?token={self.token}"
Payload = {
"method": "add_countdown_rule",
"params": {
"delay": int(delay),
"desired_states": {
"on": True
},
"enable": True,
"remain": int(delay)
},
"terminalUUID": "0A950402-7224-46EB-A450-7362CDB902A2"
}
headers = {
"Cookie": self.cookie
}
EncryptedPayload = self.tpLinkCipher.encrypt(json.dumps(Payload))
SecurePassthroughPayload = {
"method": "securePassthrough",
"params": {
"request": EncryptedPayload
}
}
r = self.session.post(URL, json=SecurePassthroughPayload, headers=headers)
decryptedResponse = self.tpLinkCipher.decrypt(r.json()["result"]["response"])
return decryptedResponse
def turnOffWithDelay(self, delay):
URL = f"http://{self.ipAddress}/app?token={self.token}"
Payload = {
"method": "add_countdown_rule",
"params": {
"delay": int(delay),
"desired_states": {
"on": False
},
"enable": True,
"remain": int(delay)
},
"terminalUUID": "0A950402-7224-46EB-A450-7362CDB902A2"
}
headers = {
"Cookie": self.cookie
}
EncryptedPayload = self.tpLinkCipher.encrypt(json.dumps(Payload))
SecurePassthroughPayload = {
"method": "securePassthrough",
"params": {
"request": EncryptedPayload
}
}
r = self.session.post(URL, json=SecurePassthroughPayload, headers=headers)
decryptedResponse = self.tpLinkCipher.decrypt(r.json()["result"]["response"])
return decryptedResponse
| 29.018293
| 93
| 0.539609
| 379
| 4,759
| 6.722955
| 0.203166
| 0.062794
| 0.015699
| 0.023548
| 0.907771
| 0.907771
| 0.894035
| 0.894035
| 0.894035
| 0.894035
| 0
| 0.039735
| 0.333684
| 4,759
| 163
| 94
| 29.196319
| 0.763797
| 0
| 0
| 0.65873
| 0
| 0
| 0.206766
| 0.030258
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039683
| false
| 0.119048
| 0.031746
| 0
| 0.119048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
490166dd8cb2244fb85cecfd652e7e73615184d0
| 18,705
|
py
|
Python
|
saleor/graphql/giftcard/tests/mutations/test_gift_card_create.py
|
felipearmat/saleor
|
34c01912fede74dae45edfd23c1bfdca8ad26e35
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/giftcard/tests/mutations/test_gift_card_create.py
|
felipearmat/saleor
|
34c01912fede74dae45edfd23c1bfdca8ad26e35
|
[
"CC-BY-4.0"
] | 101
|
2018-06-02T17:33:17.000Z
|
2022-03-28T04:46:22.000Z
|
saleor/graphql/giftcard/tests/mutations/test_gift_card_create.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | null | null | null |
from datetime import date, timedelta
from .....giftcard import GiftCardEvents
from .....giftcard.error_codes import GiftCardErrorCode
from ....core.enums import TimePeriodTypeEnum
from ....tests.utils import assert_no_permission, get_graphql_content
from ...enums import GiftCardExpiryTypeEnum
CREATE_GIFT_CARD_MUTATION = """
mutation giftCardCreate(
$balance: PriceInput!, $userEmail: String, $tag: String,
$expirySettings: GiftCardExpirySettingsInput!, $note: String
){
giftCardCreate(input: {
balance: $balance, userEmail: $userEmail, tag: $tag,
expirySettings: $expirySettings, note: $note }) {
giftCard {
id
code
displayCode
isActive
expiryDate
expiryType
expiryPeriod {
amount
type
}
tag
created
lastUsedOn
initialBalance {
currency
amount
}
currentBalance {
currency
amount
}
createdBy {
email
}
usedBy {
email
}
createdByEmail
usedByEmail
app {
name
}
product {
name
}
events {
type
user {
email
}
app {
name
}
balance {
initialBalance {
amount
currency
}
oldInitialBalance {
amount
currency
}
currentBalance {
amount
currency
}
oldCurrentBalance {
amount
currency
}
}
}
}
errors {
field
message
code
}
}
}
"""
def test_create_never_expiry_gift_card(
staff_api_client,
customer_user,
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
):
# given
initial_balance = 100
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.NEVER_EXPIRE.name
tag = "gift-card-tag"
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"userEmail": customer_user.email,
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
},
}
# when
response = staff_api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
permissions=[
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
],
)
# then
content = get_graphql_content(response)
errors = content["data"]["giftCardCreate"]["errors"]
data = content["data"]["giftCardCreate"]["giftCard"]
assert not errors
assert data["code"]
assert data["displayCode"]
assert data["expiryType"] == expiry_type.upper()
assert not data["expiryDate"]
assert not data["expiryPeriod"]
assert data["tag"] == tag
assert data["createdBy"]["email"] == staff_api_client.user.email
assert data["createdByEmail"] == staff_api_client.user.email
assert not data["usedBy"]
assert not data["usedByEmail"]
assert not data["app"]
assert not data["lastUsedOn"]
assert data["isActive"]
assert data["initialBalance"]["amount"] == initial_balance
assert data["currentBalance"]["amount"] == initial_balance
assert len(data["events"]) == 1
event = data["events"][0]
assert event["type"] == GiftCardEvents.ISSUED.upper()
assert event["user"]["email"] == staff_api_client.user.email
assert not event["app"]
assert event["balance"]["initialBalance"]["amount"] == initial_balance
assert event["balance"]["initialBalance"]["currency"] == currency
assert event["balance"]["currentBalance"]["amount"] == initial_balance
assert event["balance"]["currentBalance"]["currency"] == currency
assert not event["balance"]["oldInitialBalance"]
assert not event["balance"]["oldCurrentBalance"]
def test_create_gift_card_by_app(
app_api_client,
customer_user,
permission_manage_gift_card,
permission_manage_users,
):
# given
initial_balance = 100
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.NEVER_EXPIRE.name
tag = "gift-card-tag"
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"userEmail": customer_user.email,
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
"expiryDate": None,
},
}
# when
response = app_api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
permissions=[permission_manage_gift_card, permission_manage_users],
)
# then
content = get_graphql_content(response)
errors = content["data"]["giftCardCreate"]["errors"]
data = content["data"]["giftCardCreate"]["giftCard"]
assert not errors
assert data["code"]
assert data["displayCode"]
assert data["expiryType"] == expiry_type.upper()
assert not data["expiryDate"]
assert not data["expiryPeriod"]
assert data["tag"] == tag
assert not data["createdBy"]
assert not data["createdByEmail"]
assert not data["usedBy"]
assert not data["usedByEmail"]
assert data["app"]["name"] == app_api_client.app.name
assert not data["lastUsedOn"]
assert data["isActive"]
assert data["initialBalance"]["amount"] == initial_balance
assert data["currentBalance"]["amount"] == initial_balance
assert len(data["events"]) == 1
event = data["events"][0]
assert event["type"] == GiftCardEvents.ISSUED.upper()
assert not event["user"]
assert event["app"]["name"] == app_api_client.app.name
assert event["balance"]["initialBalance"]["amount"] == initial_balance
assert event["balance"]["initialBalance"]["currency"] == currency
assert event["balance"]["currentBalance"]["amount"] == initial_balance
assert event["balance"]["currentBalance"]["currency"] == currency
assert not event["balance"]["oldInitialBalance"]
assert not event["balance"]["oldCurrentBalance"]
def test_create_gift_card_by_customer(api_client, customer_user):
# given
initial_balance = 100
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.NEVER_EXPIRE.name
tag = "gift-card-tag"
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"userEmail": customer_user.email,
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
"expiryDate": None,
},
}
# when
response = api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
)
# then
assert_no_permission(response)
def test_create_gift_card_no_premissions(staff_api_client):
# given
initial_balance = 100
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.NEVER_EXPIRE.name
tag = "gift-card-tag"
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
"expiryDate": None,
},
}
# when
response = staff_api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
)
# then
assert_no_permission(response)
def test_create_gift_card_with_too_many_decimal_places_in_balance_amount(
staff_api_client,
customer_user,
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
):
# given
initial_balance = 10.123
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.NEVER_EXPIRE.name
tag = "gift-card-tag"
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"userEmail": customer_user.email,
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
},
}
# when
response = staff_api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
permissions=[
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
],
)
# then
content = get_graphql_content(response)
errors = content["data"]["giftCardCreate"]["errors"]
data = content["data"]["giftCardCreate"]["giftCard"]
assert not data
assert len(errors) == 1
assert errors[0]["field"] == "balance"
assert errors[0]["code"] == GiftCardErrorCode.INVALID.name
def test_create_gift_card_with_expiry_date(
staff_api_client,
customer_user,
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
):
# given
initial_balance = 100
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.EXPIRY_DATE.name
date_value = date.today() + timedelta(days=365)
tag = "gift-card-tag"
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"userEmail": customer_user.email,
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
"expiryDate": date_value,
},
}
# when
response = staff_api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
permissions=[
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
],
)
# then
content = get_graphql_content(response)
errors = content["data"]["giftCardCreate"]["errors"]
data = content["data"]["giftCardCreate"]["giftCard"]
assert not errors
assert data["code"]
assert data["displayCode"]
assert data["expiryType"] == expiry_type.upper()
assert data["expiryDate"] == date_value.isoformat()
assert not data["expiryPeriod"]
assert len(data["events"]) == 1
event = data["events"][0]
assert event["type"] == GiftCardEvents.ISSUED.upper()
assert event["user"]["email"] == staff_api_client.user.email
def test_create_gift_card_with_expiry_date_type_date_not_given(
staff_api_client,
customer_user,
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
):
# given
initial_balance = 100
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.EXPIRY_DATE.name
tag = "gift-card-tag"
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"userEmail": customer_user.email,
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
},
}
# when
response = staff_api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
permissions=[
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
],
)
# then
content = get_graphql_content(response)
errors = content["data"]["giftCardCreate"]["errors"]
data = content["data"]["giftCardCreate"]["giftCard"]
assert not data
assert len(errors) == 1
assert errors[0]["field"] == "expiryDate"
assert errors[0]["code"] == GiftCardErrorCode.REQUIRED.name
def test_create_gift_card_with_expiry_date_type_date_in_past(
staff_api_client,
customer_user,
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
):
# given
initial_balance = 100
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.EXPIRY_DATE.name
date_value = date(1999, 1, 1)
tag = "gift-card-tag"
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"userEmail": customer_user.email,
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
"expiryDate": date_value,
},
}
# when
response = staff_api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
permissions=[
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
],
)
# then
content = get_graphql_content(response)
errors = content["data"]["giftCardCreate"]["errors"]
data = content["data"]["giftCardCreate"]["giftCard"]
assert not data
assert len(errors) == 1
assert errors[0]["field"] == "expiryDate"
assert errors[0]["code"] == GiftCardErrorCode.INVALID.name
def test_create_gift_card_with_expiry_period(
staff_api_client,
customer_user,
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
):
# given
initial_balance = 100
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.EXPIRY_PERIOD.name
tag = "gift-card-tag"
period_amount = 10
period_type = TimePeriodTypeEnum.MONTH.name
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"userEmail": customer_user.email,
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
"expiryPeriod": {
"type": period_type,
"amount": period_amount,
},
},
}
# when
response = staff_api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
permissions=[
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
],
)
# then
content = get_graphql_content(response)
errors = content["data"]["giftCardCreate"]["errors"]
data = content["data"]["giftCardCreate"]["giftCard"]
assert not errors
assert data["code"]
assert data["displayCode"]
assert data["expiryType"] == expiry_type.upper()
assert not data["expiryDate"]
assert data["expiryPeriod"]["amount"] == period_amount
assert data["expiryPeriod"]["type"] == period_type
assert len(data["events"]) == 1
event = data["events"][0]
assert event["type"] == GiftCardEvents.ISSUED.upper()
assert event["user"]["email"] == staff_api_client.user.email
def test_create_gift_card_with_expiry_period_negative_amount(
staff_api_client,
customer_user,
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
):
# given
initial_balance = 100
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.EXPIRY_PERIOD.name
tag = "gift-card-tag"
period_amount = -10
period_type = TimePeriodTypeEnum.MONTH.name
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"userEmail": customer_user.email,
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
"expiryPeriod": {
"type": period_type,
"amount": period_amount,
},
},
}
# when
response = staff_api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
permissions=[
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
],
)
# then
content = get_graphql_content(response)
errors = content["data"]["giftCardCreate"]["errors"]
data = content["data"]["giftCardCreate"]["giftCard"]
assert not data
assert len(errors) == 1
assert errors[0]["field"] == "expiryPeriod"
assert errors[0]["code"] == GiftCardErrorCode.INVALID.name
def test_create_gift_card_with_expiry_period_type_period_data_not_given(
staff_api_client,
customer_user,
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
):
# given
initial_balance = 100
currency = "USD"
expiry_type = GiftCardExpiryTypeEnum.EXPIRY_PERIOD.name
tag = "gift-card-tag"
variables = {
"balance": {
"amount": initial_balance,
"currency": currency,
},
"userEmail": customer_user.email,
"tag": tag,
"note": "This is gift card note that will be save in gift card event.",
"expirySettings": {
"expiryType": expiry_type,
},
}
# when
response = staff_api_client.post_graphql(
CREATE_GIFT_CARD_MUTATION,
variables,
permissions=[
permission_manage_gift_card,
permission_manage_users,
permission_manage_apps,
],
)
# then
content = get_graphql_content(response)
errors = content["data"]["giftCardCreate"]["errors"]
data = content["data"]["giftCardCreate"]["giftCard"]
assert not data
assert len(errors) == 1
assert errors[0]["field"] == "expiryPeriod"
assert errors[0]["code"] == GiftCardErrorCode.REQUIRED.name
| 28.955108
| 79
| 0.586848
| 1,780
| 18,705
| 5.92191
| 0.069663
| 0.056162
| 0.030547
| 0.040983
| 0.879328
| 0.876957
| 0.876577
| 0.868419
| 0.862916
| 0.85286
| 0
| 0.005492
| 0.308848
| 18,705
| 645
| 80
| 29
| 0.80987
| 0.009356
| 0
| 0.754069
| 0
| 0
| 0.280493
| 0.002703
| 0
| 0
| 0
| 0
| 0.169982
| 1
| 0.019892
| false
| 0
| 0.01085
| 0
| 0.030741
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b453cdd71bd5d3851a13fb669731ca007702389
| 17,274
|
py
|
Python
|
tests/request/django_2x/test_patched_request.py
|
zlqm/django_improved_view
|
642e5fdc5ebfab5d73fc2cdc67c4a916b239b479
|
[
"BSD-3-Clause"
] | 1
|
2021-02-10T08:26:30.000Z
|
2021-02-10T08:26:30.000Z
|
tests/request/django_2x/test_patched_request.py
|
zlqm/simple_django_api
|
642e5fdc5ebfab5d73fc2cdc67c4a916b239b479
|
[
"BSD-3-Clause"
] | null | null | null |
tests/request/django_2x/test_patched_request.py
|
zlqm/simple_django_api
|
642e5fdc5ebfab5d73fc2cdc67c4a916b239b479
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from io import BytesIO
from django.http import (RawPostDataException, UnreadablePostError)
from django.test import SimpleTestCase
from django.test.client import FakePayload
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import urlencode as original_urlencode
from simple_django_api.request import WSGIRequest
HTTP_METHODS_WITH_BODY = ('POST', 'PUT', 'PATCH')
class OriginRequestTest(SimpleTestCase):
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or body.
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
self.assertEqual(request.POST, {'name': ['value']})
self.assertEqual(request.body, b'name=value')
self.assertEqual(request.read(), b'name=value')
def test_value_after_read(self):
"""
Construction of POST or body is not allowed after reading
from request.
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
self.assertEqual(request.read(2), b'na')
with self.assertRaises(RawPostDataException):
request.body
self.assertEqual(request.POST, {})
def test_non_ascii_POST(self):
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload(urlencode({'key': 'España'}))
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_alternate_charset_POST(self):
"""
Test a POST with non-utf-8 payload encoding.
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload(
original_urlencode({'key': 'España'.encode('latin-1')}))
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE':
'application/x-www-form-urlencoded; charset=iso-8859-1',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_body_after_POST_multipart_form_data(self):
"""
Reading body after parsing multipart/form-data is not allowed
"""
# Because multipart is used for large amounts of data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting body = '' either.
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload("\r\n".join([
'--boundary', 'Content-Disposition: form-data; name="name"',
'', 'value', '--boundary--'
''
]))
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
self.assertEqual(request.POST, {'name': ['value']})
with self.assertRaises(RawPostDataException):
request.body
def test_body_after_POST_multipart_related(self):
"""
Reading body after parsing multipart that isn't form-data is allowed
"""
# Ticket #9054
# There are cases in which the multipart data is related instead of
# being a binary upload, in which case it should still be accessible
# via body.
for method in HTTP_METHODS_WITH_BODY:
payload_data = b"\r\n".join([
b'--boundary', b'Content-ID: id; name="name"', b'', b'value',
b'--boundary--'
b''
])
payload = FakePayload(payload_data)
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'multipart/related; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
self.assertEqual(request.POST, {})
self.assertEqual(request.body, payload_data)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to be handled.
"""
# According to:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# Every request.POST with Content-Length >= 0 is a valid request,
# this test ensures that we handle Content-Length == 0.
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload("\r\n".join([
'--boundary', 'Content-Disposition: form-data; name="name"',
'', 'value', '--boundary--'
''
]))
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': 0,
'wsgi.input': payload
})
self.assertEqual(request.POST, {})
def test_POST_binary_only(self):
for method in HTTP_METHODS_WITH_BODY:
payload = b'\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@'
environ = {
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'application/octet-stream',
'CONTENT_LENGTH': len(payload),
'wsgi.input': BytesIO(payload)
}
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
# Same test without specifying content-type
environ.update({
'CONTENT_TYPE': '',
'wsgi.input': BytesIO(payload)
})
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
def test_read_by_lines(self):
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
self.assertEqual(list(request), [b'name=value'])
def test_POST_after_body_read(self):
"""
POST should be populated even if body is read first
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
request.body # evaluate
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read(self):
"""
POST should be populated even if body is read first, and then
the stream is read second.
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
request.body # evaluate
self.assertEqual(request.read(1), b'n')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read_multipart(self):
"""
POST should be populated even if body is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload("\r\n".join([
'--boundary', 'Content-Disposition: form-data; name="name"',
'', 'value', '--boundary--'
''
]))
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
request.body # evaluate
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), b'--boundary\r\nC')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_immutable_for_mutipart(self):
"""
MultiPartParser.parse() leaves request.POST immutable.
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--',
]))
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertFalse(request.POST._mutable)
def test_POST_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
POST, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
for method in HTTP_METHODS_WITH_BODY:
payload = b'name=value'
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)
})
with self.assertRaises(UnreadablePostError):
request.body
def test_set_encoding_clears_POST(self):
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('name=Hello Günter')
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'name': ['Hello Günter']})
request.encoding = 'iso-8859-16'
self.assertEqual(request.POST, {'name': ['Hello GĂŒnter']})
def test_FILES_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
FILES, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
for method in HTTP_METHODS_WITH_BODY:
payload = b'x'
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'multipart/form-data; boundary=foo_',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)
})
with self.assertRaises(UnreadablePostError):
request.FILES
class PatchedRequestsTests(SimpleTestCase):
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
data or body.
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('{"name": "value"}')
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
self.assertEqual(request.data, {'name': 'value'})
self.assertEqual(request.body, b'{"name": "value"}')
self.assertEqual(request.read(), b'{"name": "value"}')
def test_value_after_read(self):
"""
Construction of POST or body is not allowed after reading
from request.
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('{"name": "value"}')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
self.assertEqual(request.read(2), b'{"')
with self.assertRaises(RawPostDataException):
request.body
self.assertEqual(request.data, {})
def test_non_ascii_POST(self):
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('{"name": "佚名"}')
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/json',
'wsgi.input': payload,
})
self.assertEqual(request.data, {'name': '佚名'})
def test_alternate_charset_POST(self):
"""
Test a POST with non-utf-8 payload encoding.
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('{"name": "佚名"}'.encode('GBK'))
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/json; charset=GBK',
'wsgi.input': payload,
})
self.assertEqual(request.data, {'name': '佚名'})
def test_POST_after_body_read(self):
"""
POST should be populated even if body is read first
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('{"name": "佚名"}')
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
request.body # evaluate
self.assertEqual(request.data, {'name': '佚名'})
def test_POST_after_body_read_and_stream_read(self):
"""
POST should be populated even if body is read first, and then
the stream is read second.
"""
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('{"name": "佚名"}')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload
})
request.body # evaluate
self.assertEqual(request.read(1), b'{')
self.assertEqual(request.POST, {'name': '佚名'})
self.assertEqual(request.data, {'name': '佚名'})
def test_POST_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
POST, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
for method in HTTP_METHODS_WITH_BODY:
payload = '{"name": "佚名"}'.encode('utf8')
request = WSGIRequest({
'REQUEST_METHOD': method,
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)
})
with self.assertRaises(UnreadablePostError):
request.body
def test_set_encoding_clears_POST(self):
for method in HTTP_METHODS_WITH_BODY:
payload = FakePayload('{"name": "佚名"}')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.data, {'name': '佚名'})
request.encoding = 'GBK'
self.assertEqual(request.POST, {'name': '浣氬悕'})
| 39.259091
| 90
| 0.550828
| 1,710
| 17,274
| 5.412281
| 0.125731
| 0.061588
| 0.087952
| 0.051324
| 0.819125
| 0.811345
| 0.780119
| 0.773204
| 0.766072
| 0.732685
| 0
| 0.005141
| 0.335649
| 17,274
| 439
| 91
| 39.348519
| 0.801325
| 0.125101
| 0
| 0.781538
| 0
| 0.003077
| 0.195082
| 0.024796
| 0
| 0
| 0
| 0
| 0.138462
| 1
| 0.083077
| false
| 0
| 0.024615
| 0
| 0.123077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b59e35147da1e0e53e044bb8b7b8b670a4b5cd5
| 21,320
|
py
|
Python
|
test/test_index_calculation.py
|
vthorsteinsson/Tokenizer
|
19ade7dc63c131e1942fba31159bc8fc771aec3e
|
[
"MIT"
] | 10
|
2018-01-17T19:17:17.000Z
|
2019-02-19T02:25:36.000Z
|
test/test_index_calculation.py
|
vthorsteinsson/Tokenizer
|
19ade7dc63c131e1942fba31159bc8fc771aec3e
|
[
"MIT"
] | 4
|
2018-04-20T08:45:39.000Z
|
2018-11-28T18:13:15.000Z
|
test/test_index_calculation.py
|
vthorsteinsson/Tokenizer
|
19ade7dc63c131e1942fba31159bc8fc771aec3e
|
[
"MIT"
] | 3
|
2018-04-20T08:36:12.000Z
|
2018-11-20T16:31:55.000Z
|
# type: ignore
"""
test_index_calculation.py
Tests for Tokenizer module
Copyright (C) 2022 by Miðeind ehf.
This software is licensed under the MIT License:
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This module tests the token index generation of the tokenizer.
"""
import tokenizer
Tok = tokenizer.Tok
TOK = tokenizer.TOK
ACCENT = chr(769)
UMLAUT = chr(776)
EM_DASH = "\u2014"
def test_small_easy_cases() -> None:
s = "Bara ASCII."
# 01234567890
# ^ ^ ^
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 4, 10]
assert byte_indexes == [0, 4, 10]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 4, 10, 11]
assert byte_indexes == [0, 4, 10, 11]
s = "Á bát."
# char:
# 012345
# ^^ ^
# byte:
# two-byte letters:
# x x
# indexes:
# 023467
# ^^ ^
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 1, 5]
assert byte_indexes == [0, 2, 7]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 1, 5, 6]
assert byte_indexes == [0, 2, 7, 8]
s = "endar á ö"
# 012345678
# ^ ^ ^
# x x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 5, 7]
assert byte_indexes == [0, 5, 8]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 5, 7, 9]
assert byte_indexes == [0, 5, 8, 11]
def test_small_difficult_cases() -> None:
s = ""
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == []
assert byte_indexes == []
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0]
assert byte_indexes == [0]
s = " "
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0]
assert byte_indexes == [0]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 1]
assert byte_indexes == [0, 1]
# Single byte characters
for x in ["a", "A", ".", "?", "!"]:
s = x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0]
assert byte_indexes == [0]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 1]
assert byte_indexes == [0, 1]
s = " " + x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0]
assert byte_indexes == [0]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 2]
assert byte_indexes == [0, 2]
s = " " + x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0]
assert byte_indexes == [0]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 3]
assert byte_indexes == [0, 3]
s = " " + x + " "
# example:
# " a "
# 0123
# ^ ^
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 3]
assert byte_indexes == [0, 3]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 3, 4]
assert byte_indexes == [0, 3, 4]
s = " " + x + " " + x
# example:
# " a a"
# ^ ^
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 2]
assert byte_indexes == [0, 2]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 2, 4]
assert byte_indexes == [0, 2, 4]
# Two byte characters
for x in ["þ", "æ", "á"]:
s = x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0], s
assert byte_indexes == [0], s
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 1], s
assert byte_indexes == [0, 2], s
s = " " + x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0]
assert byte_indexes == [0]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 2]
assert byte_indexes == [0, 3]
s = " " + x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0]
assert byte_indexes == [0]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 3]
assert byte_indexes == [0, 4]
s = " " + x + " "
# example bytes:
# " þ_ "
# 01234
# ^ ^
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 3]
assert byte_indexes == [0, 4]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 3, 4]
assert byte_indexes == [0, 4, 5]
s = " " + x + " " + x
# example bytes:
# " þ_ þ_"
# 012345
# ^ ^
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 2]
assert byte_indexes == [0, 3]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 2, 4]
assert byte_indexes == [0, 3, 6]
# Two character characters
# These strings contain two unicode code points that are rendered as one letter.
# They are counted as two characters in python.
# In addition the accent and umlaut characters are two bytes.
for x in ["a"+ACCENT, "o"+UMLAUT]:
s = x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0], s
assert byte_indexes == [0], s
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 2], s
assert byte_indexes == [0, 3], s
s = " " + x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0]
assert byte_indexes == [0]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 3]
assert byte_indexes == [0, 4]
s = " " + x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0]
assert byte_indexes == [0]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 4]
assert byte_indexes == [0, 5]
s = " " + x + " "
# example chars:
# " a´ "
# 01234
# ^ ^^
# example bytes:
# " a´_ "
# 012345
# ^ ^ ^
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 4]
assert byte_indexes == [0, 5]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 4, 5]
assert byte_indexes == [0, 5, 6]
s = " " + x + " " + x
# example chars:
# " a´ a´"
# 012345
# ^ ^
# example bytes:
# " a´_ a´_"
# 01234567
# ^ ^
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 3]
assert byte_indexes == [0, 4]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 3, 6]
assert byte_indexes == [0, 4, 8]
# The em-dash is 3 bytes
for x in [EM_DASH]:
s = x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0], s
assert byte_indexes == [0], s
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 1], s
assert byte_indexes == [0, 3], s
s = " " + x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0]
assert byte_indexes == [0]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 2]
assert byte_indexes == [0, 4]
s = " " + x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0]
assert byte_indexes == [0]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 3]
assert byte_indexes == [0, 5]
s = " " + x + " "
# example chars:
# " a "
# 0123
# ^ ^
# example bytes:
# " a__ "
# 012345
# ^ ^
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 3]
assert byte_indexes == [0, 5]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 3, 4]
assert byte_indexes == [0, 5, 6]
s = " " + x + " " + x
# example chars:
# " a a"
# 0123
# ^ ^
# example bytes:
# " a__ a__"
# 01234567
# ^ ^
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 2]
assert byte_indexes == [0, 4]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 2, 4]
assert byte_indexes == [0, 4, 8]
def test_larger_case() -> None:
s = "Þessi setning er í lengra lagi og er með bæði eins og tveggja bæta stafi."
# 0123456789012345678901234567890123456789012345678901234567890123456789012
# ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
# x x x xx x
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 5, 13, 16, 18, 25, 30, 33, 36, 40, 45, 50, 53, 61, 66, 72]
assert byte_indexes == [0, 6, 14, 17, 20, 27, 32, 35, 38, 43, 50, 55, 58, 66, 72, 78]
toks = tokenizer.parse_tokens([s])
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 5, 13, 16, 18, 25, 30, 33, 36, 40, 45, 50, 53, 61, 66, 72, 73]
assert byte_indexes == [0, 6, 14, 17, 20, 27, 32, 35, 38, 43, 50, 55, 58, 66, 72, 78, 79]
def test_iterator_cases() -> None:
s = ["Þessi ", "setning ", "er ", "í ", "lengra ", "lagi ", "og ", "er ", "með ", "bæði ", "eins ", "og ", "tveggja ", "bæta ", "stafi."]
# (char and byte indexes in a similar test above)
toks = tokenizer.parse_tokens(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 5, 13, 16, 18, 25, 30, 33, 36, 40, 45, 50, 53, 61, 66, 72]
assert byte_indexes == [0, 6, 14, 17, 20, 27, 32, 35, 38, 43, 50, 55, 58, 66, 72, 78]
toks = tokenizer.parse_tokens(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 5, 13, 16, 18, 25, 30, 33, 36, 40, 45, 50, 53, 61, 66, 72, 73]
assert byte_indexes == [0, 6, 14, 17, 20, 27, 32, 35, 38, 43, 50, 55, 58, 66, 72, 78, 79]
s = ["Stutt setning.", "", "Önnur setning."]
# 01234567890123 45678901234567
# ^ ^ ^ ^ ^ ^
# x
toks = tokenizer.parse_tokens(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 5, 13, 14, 19, 27]
assert byte_indexes == [0, 5, 13, 14, 20, 28]
toks = tokenizer.parse_tokens(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 5, 13, 14, 19, 27, 28]
assert byte_indexes == [0, 5, 13, 14, 20, 28, 29]
# parse_tokens does some implentation-detail-stuff here. Use tokenize instead.
s = [" Stutt setning. ", "\n \n", "Önnur setning."]
# 0123456789012345 6 78 90123456789012
# ^ ^ ^^ ^ ^
# x
toks = tokenizer.tokenize(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 6, 14, 15, 24, 32]
assert byte_indexes == [0, 6, 14, 15, 25, 33]
toks = tokenizer.tokenize(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 6, 14, 15, 24, 32, 33]
assert byte_indexes == [0, 6, 14, 15, 25, 33, 34]
def test_paragraph_markers() -> None:
s = "[[Stutt setning.]][[]][[Önnur setning.]]"
# 012345678901234567890123456789012345678901234567
# ^^^ ^ ^^ ^ ^ ^ ^ ^ ^^
# x
toks = tokenizer.parse_tokens(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 2, 7, 15, 16, 18, 20, 22, 24, 29, 37, 38]
assert byte_indexes == [0, 2, 7, 15, 16, 18, 20, 22, 24, 30, 38, 39]
toks = tokenizer.parse_tokens(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 2, 7, 15, 16, 18, 20, 22, 24, 29, 37, 38, 40]
assert byte_indexes == [0, 2, 7, 15, 16, 18, 20, 22, 24, 30, 38, 39, 41]
# The tokenize functions does stuff to paragraph markers. Test that the
# indexes are properly calculated after that.
# Note that the text of the dropped empty paragraph markers disappears.
s = "[[Stutt setning.]][[]][[Önnur setning.]]"
# 012345678901234567890123456789012345678901234567
# ^ ^ ^ ^^ ^ ^ ^ ^^
# x
toks = tokenizer.tokenize(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 2, 7, 15, 16, 18, 24, 29, 37, 38]
assert byte_indexes == [0, 2, 7, 15, 16, 18, 24, 30, 38, 39]
toks = tokenizer.tokenize(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 2, 7, 15, 16, 18, 24, 29, 37, 38, 40]
assert byte_indexes == [0, 2, 7, 15, 16, 18, 24, 30, 38, 39, 41]
def test_composite_phrases() -> None:
s = "Orða- og tengingasetning."
# 0123456789012345678901234
# ^ ^^ ^ ^
# x
toks = tokenizer.parse_tokens(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 4, 5, 8, 24]
assert byte_indexes == [0, 5, 6, 9, 25]
toks = tokenizer.parse_tokens(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 4, 5, 8, 24, 25]
assert byte_indexes == [0, 5, 6, 9, 25, 26]
# The whole thing gets squished together into a single token.
s = "Orða- og tengingasetning."
# 0123456789012345678901234
# ^ ^
# x
toks = tokenizer.tokenize(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 24]
assert byte_indexes == [0, 25]
toks = tokenizer.tokenize(s)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 24, 25]
assert byte_indexes == [0, 25, 26]
def test_lengthening_substitutions() -> None:
s = "Þetta er 3ji báturinn!"
# 0123456789012345678901
# ^ ^ ^ ^ ^
# x x
# ! lengthening happens here (3ji->þriðji)
toks = tokenizer.parse_tokens(s, handle_kludgy_ordinals=tokenizer.KLUDGY_ORDINALS_MODIFY)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 5, 8, 12, 21]
assert byte_indexes == [0, 6, 9, 13, 23]
toks = tokenizer.parse_tokens(s, handle_kludgy_ordinals=tokenizer.KLUDGY_ORDINALS_MODIFY)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 5, 8, 12, 21, 22]
assert byte_indexes == [0, 6, 9, 13, 23, 24]
def test_converted_measurements() -> None:
s = "Stillið ofninn á 12° C til að baka kökuna."
# 012345678901234567890123456789012345678901
# ^ ^ ^ ^ ^ ^ ^ ^ ^
# x x x x x
toks = tokenizer.tokenize(s, convert_measurements=True)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks)
assert char_indexes == [0, 7, 14, 16, 22, 26, 29, 34, 41]
assert byte_indexes == [0, 8, 15, 18, 25, 29, 33, 38, 46]
toks = tokenizer.tokenize(s, convert_measurements=True)
char_indexes, byte_indexes = tokenizer.calculate_indexes(toks, last_is_end=True)
assert char_indexes == [0, 7, 14, 16, 22, 26, 29, 34, 41, 42]
assert byte_indexes == [0, 8, 15, 18, 25, 29, 33, 38, 46, 47]
def test_compound() -> None:
s = " Katrín Júlíusdóttir var iðnaðar- \n\t og \t\t viðskiptaráðherra"
tlist = list(tokenizer.tokenize(s))
assert sum(len(t.original or "") for t in tlist) == len(s)
| 40.609524
| 141
| 0.59742
| 2,720
| 21,320
| 4.491176
| 0.120221
| 0.126965
| 0.085953
| 0.126064
| 0.793549
| 0.782253
| 0.767436
| 0.767272
| 0.750491
| 0.727243
| 0
| 0.085617
| 0.284522
| 21,320
| 524
| 142
| 40.687023
| 0.714763
| 0.175375
| 0
| 0.747774
| 0
| 0
| 0.031752
| 0.002416
| 0
| 0
| 0
| 0
| 0.418398
| 1
| 0.026706
| false
| 0
| 0.002967
| 0
| 0.029674
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b5db7ed44c3ed53a93c41b43f259a5e9de1bd30
| 2,614
|
py
|
Python
|
tests/test_compound_doc.py
|
pbutler/jsg
|
de2dc80776093411069f86930abc1a19204c3f32
|
[
"MIT"
] | null | null | null |
tests/test_compound_doc.py
|
pbutler/jsg
|
de2dc80776093411069f86930abc1a19204c3f32
|
[
"MIT"
] | null | null | null |
tests/test_compound_doc.py
|
pbutler/jsg
|
de2dc80776093411069f86930abc1a19204c3f32
|
[
"MIT"
] | null | null | null |
from jsg import Document, CompoundDocument
from jsg.fields import StringField
from utils import check_schema, schema
def test_compound_doc(schema):
@schema.add()
class A(Document):
a = StringField()
@schema.add()
class B(Document):
b = StringField()
@schema.add()
class C(CompoundDocument):
one_of = ["A", "B"]
check_schema(schema, "C", {
'$schema': 'http://json-schema.org/draft-04/schema#',
'definitions': {
'A': {
'type': 'object',
'properties': {
'a': {
'type': 'string',
}
},
},
'B': {
'type': 'object',
'properties': {
'b': {
'type': 'string',
}
},
},
'C': {
'oneOf': [
{
'$ref': '#/definitions/A',
},
{
'$ref': '#/definitions/B',
},
]
}
},
'$ref': '#/definitions/C',
})
def test_compound_doc_two_types(schema):
@schema.add()
class A(Document):
a = StringField()
@schema.add()
class B(Document):
b = StringField()
@schema.add()
class C(CompoundDocument):
one_of = ["A", "B"]
any_of = ["A", "B"]
check_schema(schema, "C", {
'$schema': 'http://json-schema.org/draft-04/schema#',
'definitions': {
'A': {
'type': 'object',
'properties': {
'a': {
'type': 'string',
}
},
},
'B': {
'type': 'object',
'properties': {
'b': {
'type': 'string',
}
},
},
'C': {
'anyOf': [
{
'$ref': '#/definitions/A',
},
{
'$ref': '#/definitions/B',
},
],
'oneOf': [
{
'$ref': '#/definitions/A',
},
{
'$ref': '#/definitions/B',
},
]
}
},
'$ref': '#/definitions/C',
})
| 23.763636
| 61
| 0.300306
| 161
| 2,614
| 4.801242
| 0.229814
| 0.14489
| 0.108668
| 0.129366
| 0.804657
| 0.804657
| 0.765847
| 0.765847
| 0.765847
| 0.765847
| 0
| 0.003413
| 0.551645
| 2,614
| 109
| 62
| 23.981651
| 0.656143
| 0
| 0
| 0.604167
| 0
| 0
| 0.160413
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.03125
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ba9692648d095439d162e6a3ef32c1b02cd1523
| 5,408
|
py
|
Python
|
regularTour/forms.py
|
irfanmaulananasution/guidian
|
b5224c58d320c81bb085a89bcc011e020207a1f2
|
[
"MIT"
] | 1
|
2021-09-09T02:34:59.000Z
|
2021-09-09T02:34:59.000Z
|
regularTour/forms.py
|
irfanmaulananasution/guidian
|
b5224c58d320c81bb085a89bcc011e020207a1f2
|
[
"MIT"
] | null | null | null |
regularTour/forms.py
|
irfanmaulananasution/guidian
|
b5224c58d320c81bb085a89bcc011e020207a1f2
|
[
"MIT"
] | null | null | null |
from django import forms
from regularTour import models
class CreateRegularTourForm(forms.Form):
destination = forms.CharField(
label = 'Destination',
required = True,
max_length = 15,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'China Town'
})
)
description = forms.CharField(
label = 'Description',
required = True,
max_length = 300,
widget = forms.Textarea(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'Write the description here'
})
)
distance = forms.CharField(
label = 'Distance',
required = True,
max_length = 5,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'Approx. 3 kms'
})
)
duration = forms.CharField(
label = 'Duration',
required = True,
max_length = 10,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': '2 - 3 hours'
})
)
meeting_point = forms.CharField(
label = 'Meeting Point',
required = True,
max_length = 150,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'In the area of Candra Naya building, Novotel Hotel Gajah Mada. Jl. Gajah Mada No. 188, Jakarta Barat.'
})
)
route = forms.CharField(
label = 'Route',
required = True,
max_length = 100,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'Candra Naya - Pantjoran Tea House - Petak Sembilan - Dharma Bakti Temple - St. Maria de Fatima Church - Toa Se Bio'
})
)
photo = forms.CharField(
label = 'Photo',
required = True,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'Upload to imgbb.com then copy the link here'
})
)
location_map = forms.CharField(
label = 'Location Map',
required = True,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'Write map location here'
})
)
class Meta:
model = models.RegularTourModel
fields = (
'destination',
'description',
'distance',
'duration',
'meeting_point',
'route',
'photo',
'location_map'
)
class UpdateRegularTourForm(forms.Form):
destination = forms.CharField(
label = 'Destination',
required = True,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'China Town'
})
)
description = forms.CharField(
label = 'Description',
required = True,
widget = forms.Textarea(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'Write the description here'
})
)
distance = forms.CharField(
label = 'Distance',
required = True,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'kms'
})
)
duration = forms.CharField(
label = 'Duration',
required = True,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'hours'
})
)
meeting_point = forms.CharField(
label = 'Meeting Point',
required = True,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'In the area of Candra Naya building, Novotel Hotel Gajah Mada. Jl. Gajah Mada No. 188, Jakarta Barat.'
})
)
route = forms.CharField(
label = 'Route',
required = True,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'Candra Naya - Pantjoran Tea House - Petak Sembilan - Dharma Bakti Temple - St. Maria de Fatima Church - Toa Se Bio'
})
)
# photo = forms.ImageField(
# label = 'Photo',
# required = True,
# help_text = 'Upload photo destination here'
# )
photo = forms.CharField(
label = 'Photo',
required = True,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': '.jpg'
})
)
location_map = forms.CharField(
label = 'Location Map',
required = True,
widget = forms.TextInput(attrs={
'class': 'form-control',
'type': 'text',
'placeholder': 'Write map location here'
})
)
class Meta:
model = models.RegularTourModel
fields = (
'destination',
'description',
'distance',
'duration',
'meeting_point',
'route',
'photo',
'location_map'
)
| 26.640394
| 143
| 0.505917
| 469
| 5,408
| 5.801706
| 0.202559
| 0.074972
| 0.111724
| 0.123484
| 0.896729
| 0.896729
| 0.896729
| 0.896729
| 0.864388
| 0.823227
| 0
| 0.006725
| 0.367604
| 5,408
| 202
| 144
| 26.772277
| 0.788889
| 0.021635
| 0
| 0.808989
| 0
| 0.022472
| 0.282876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011236
| 0
| 0.146067
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e7fcd83d9bd596a83c7b481e02613a9ce02258c3
| 93
|
py
|
Python
|
scripts/cloud_functions/login.py
|
themichaelusa/poke
|
c466084e77017c8f0c143662e1b1d19db7e55bc6
|
[
"Apache-2.0"
] | 3
|
2019-10-26T17:16:28.000Z
|
2020-03-02T17:21:34.000Z
|
scripts/cloud_functions/login.py
|
themichaelusa/poke
|
c466084e77017c8f0c143662e1b1d19db7e55bc6
|
[
"Apache-2.0"
] | 3
|
2020-02-29T17:26:51.000Z
|
2020-03-01T01:56:37.000Z
|
scripts/cloud_functions/login.py
|
themichaelusa/poke
|
c466084e77017c8f0c143662e1b1d19db7e55bc6
|
[
"Apache-2.0"
] | 1
|
2020-02-28T22:24:04.000Z
|
2020-02-28T22:24:04.000Z
|
from backend.user_auth import *
def login(uname, pwd):
return login_internal(uname, pwd)
| 23.25
| 37
| 0.752688
| 14
| 93
| 4.857143
| 0.785714
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150538
| 93
| 4
| 37
| 23.25
| 0.860759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
f023b57955cbf5ba24a0009c7e42ded7c43dc15b
| 5,556
|
py
|
Python
|
game_obj.py
|
Raymond4578/Atipycal-Invasion
|
fd1fe9b3ff31d8cfe202acb411c9cb4afd5aa401
|
[
"MIT"
] | 1
|
2021-02-08T12:50:35.000Z
|
2021-02-08T12:50:35.000Z
|
game_obj.py
|
Raymond4578/Atipycal-Invasion
|
fd1fe9b3ff31d8cfe202acb411c9cb4afd5aa401
|
[
"MIT"
] | null | null | null |
game_obj.py
|
Raymond4578/Atipycal-Invasion
|
fd1fe9b3ff31d8cfe202acb411c9cb4afd5aa401
|
[
"MIT"
] | null | null | null |
import random
class game_obj(object):
def __init__(self, x, y, pic_path):
"""
:param int x: inital x position of object
:param int y: inital y position of object
:param str pic_path: relative path of object picture
"""
self.x = x
self.y = y
self.pic_path = pic_path
def change_status(self):
"""
Needs to be overridden by subclasses
"""
pass
class player(game_obj):
def __init__(self, x, y, pic_path, acc, vel_x, vel_y, dir):
"""
:param int x: inital x position of player
:param int y: inital y position of player
:param str pic_path: relative path of object picture
:param int acc: the determined acceleration for player object (no direction involved)
:param int vel_x: the speed of player in vertical direction
:param int vel_y: the speed of player in horizontal direction
:param str dir: the direction of player given as input
"""
self.acc = acc
self.vel_x = vel_x
self.vel_y = vel_y
self.dir = dir
super().__init__(x, y, pic_path)
self.is_alive = True
def change_status(self, dir):
"""
Determine the attributes of player on the next frame
:param str dir: the direction of player given as input
null -> no input,
'a' -> left,
'd' -> right,
'w' -> up,
's' -> down
"""
if dir == None:
# no input given, continue in previous direction
self.x += vel_x
self.y += vel_y
elif dir == 'a':
# input of left given
self.vel_x -= acc
self.x += vel_x
self.y += vel_y
elif dir == 'd':
# input of right given
self.vel_x += acc
self.x += vel_x
self.y += vel_y
elif dir == 'w':
# input of up given
self.vel_y += acc
self.x += vel_x
self.y += vel_y
elif dir == 's':
# input of down given
self.vel_y -= acc
self.x += vel_x
self.y += vel_y
def is_alive(self):
"""
:return: Whether player is alive
:rtype: boolean
"""
return self.is_alive
def collide(self, oth_game_obj):
"""
:return: Whether player collided with another game object
:rtype: boolean
"""
pass
class enemy(game_obj):
def __init__(self, x, y, pic_path, acc, vel_x, vel_y, dir):
"""
:param int x: inital x position of enemy
:param int y: inital y position of enemy
:param str pic_path: relative path of object picture
:param int acc: the determined acceleration for enemy object (no direction involved)
:param int vel_x: the speed of enemy in vertical direction
:param int vel_y: the speed of enemy in horizontal direction
:param str dir: the direction of enemy given as input
"""
self.acc = acc
self.vel_x = vel_x
self.vel_y = vel_y
self.dir = dir
super().__init__(x, y, pic_path)
self.is_alive = True
def change_status(self):
"""
Determine the attributes of enemy on the next frame
:param str dir: a randomly generated direction
'a' -> left,
'd' -> right,
'w' -> up,
's' -> down
"""
dir = random.choice(['a', 'd', 'w', 's'])
if dir == 'a':
# input of left given
self.vel_x -= acc
self.x += vel_x
self.y += vel_y
elif dir == 'd':
# input of right given
self.vel_x += acc
self.x += vel_x
self.y += vel_y
elif dir == 'w':
# input of up given
self.vel_y += acc
self.x += vel_x
self.y += vel_y
elif dir == 's':
# input of down given
self.vel_y -= acc
self.x += vel_x
self.y += vel_y
def is_alive(self):
"""
:return: Whether enemy is alive
:rtype: boolean
"""
return self.is_alive
def collide(self, oth_game_obj):
"""
:return: Whether enemy collided with another game object
:rtype: boolean
"""
pass
class tower(game_obj):
def __init__(self, x, y, pic_path):
"""
:param int x: inital x position of tower
:param int y: inital y position of tower
:param str pic_path: relative path of object picture
"""
super().__init__(x, y, pic_path)
class picture(game_obj):
def __init__(self, x, y, pic_path):
"""
:param int x: inital x position of picture
:param int y: inital y position of picture
:param str pic_path: relative path of object picture
"""
super().__init__(x, y, pic_path)
class text(game_obj):
def __init__(self, x, y, info):
"""
:param int x: inital x position of picture
:param int y: inital y position of picture
:param str pic_path: relative path of object picture
"""
super().__init__(x, y, font_path)
| 27.369458
| 94
| 0.5045
| 713
| 5,556
| 3.758766
| 0.123422
| 0.031343
| 0.020522
| 0.03694
| 0.847388
| 0.839179
| 0.839179
| 0.778358
| 0.764179
| 0.686194
| 0
| 0
| 0.406947
| 5,556
| 203
| 95
| 27.369458
| 0.813354
| 0.394708
| 0
| 0.78481
| 0
| 0
| 0.004662
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164557
| false
| 0.037975
| 0.012658
| 0
| 0.278481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f084264e13202e50627937dd1211d4ba29bbfadc
| 15,737
|
py
|
Python
|
test/test_bit_stream.py
|
bcarr092/CSignal
|
056761e4997091d2907468d17fb82607a56d3bf6
|
[
"Apache-2.0"
] | 1
|
2015-11-14T01:18:27.000Z
|
2015-11-14T01:18:27.000Z
|
test/test_bit_stream.py
|
bcarr092/CSignal
|
056761e4997091d2907468d17fb82607a56d3bf6
|
[
"Apache-2.0"
] | null | null | null |
test/test_bit_stream.py
|
bcarr092/CSignal
|
056761e4997091d2907468d17fb82607a56d3bf6
|
[
"Apache-2.0"
] | null | null | null |
from csignal_tests import *
from array import array
import unittest
import struct
import socket
import random
import string
import sys
class TestsBitStream( unittest.TestCase ):
def test_bit_stream_peak( self ):
data = ''
for index in range( 1000 ):
initialNumber = random.randint( 0, 2 ** 32 - 1 )
data = data + struct.pack( "I", int( initialNumber ) )
bitPacker = python_bit_packer_initialize()
self.assertNotEquals( bitPacker, None )
bitStream = python_bit_stream_initialize_from_bit_packer( False, bitPacker )
self.assertNotEquals( bitStream, None )
for index in range( 0, len( data ), 4 ):
string = data[ index : index + 4 ]
self.assertEquals (
python_bit_packer_add_bytes( string, bitPacker ),
CPC_ERROR_CODE_NO_ERROR
)
( readOffset, writeOffset, buffer ) = python_bit_stream_peak( bitStream )
self.assertEquals( 0, readOffset )
self.assertEquals( 0, writeOffset )
self.assertEquals( len( data ), len( buffer ) )
for index in range( len( data ) ):
byteOriginal = data[ index ]
byteBitStream = buffer[ index ]
self.assertEquals( byteOriginal, byteBitStream )
( readOffset, writeOffset, buffer ) = python_bit_stream_peak( bitStream )
self.assertEquals( 0, readOffset )
self.assertEquals( 0, writeOffset )
self.assertEquals( len( data ), len( buffer ) )
bitValue = 1
self.assertEquals( CPC_ERROR_CODE_NO_ERROR, bit_packer_add_bits( bitValue, 1, bitPacker ) )
self.assertNotEquals( None, python_bit_stream_get_bits( bitStream, 1 ) )
( readOffset, writeOffset, buffer ) = python_bit_stream_peak( bitStream )
self.assertEquals( 1, readOffset )
self.assertEquals( 1, writeOffset )
self.assertEquals( len( data ) + 1, len( buffer ) )
self.assertEquals( 0x80, ord( buffer[ len( buffer ) - 1 ] ) )
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
self.assertEquals (
bit_packer_destroy( bitPacker ),
CPC_ERROR_CODE_NO_ERROR
)
def test_bit_stream_peak_negative( self ):
self.assertEquals( None, python_bit_stream_peak( None ) )
bitPacker = python_bit_packer_initialize()
self.assertNotEquals( None, bitPacker )
bitStream = python_bit_stream_initialize_from_bit_packer( False, bitPacker )
self.assertNotEquals( None, bitStream )
( read, write, buffer ) = python_bit_stream_peak( bitStream )
self.assertEquals( 0, read )
self.assertEquals( 0, write )
self.assertNotEquals( None, buffer )
self.assertEquals( 0, len( buffer ) )
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
self.assertEquals (
bit_packer_destroy( bitPacker ),
CPC_ERROR_CODE_NO_ERROR
)
def test_bit_stream_add_read( self ):
data = ''
for index in range( 1000 ):
initialNumber = random.randint( 0, 2 ** 32 - 1 )
data = data + struct.pack( "I", int( initialNumber ) )
bitPacker = python_bit_packer_initialize()
self.assertNotEquals( bitPacker, None )
bitStream = python_bit_stream_initialize_from_bit_packer( False, bitPacker )
self.assertNotEquals( bitStream, None )
for index in range( 0, len( data ), 4 ):
string = data[ index : index + 4 ]
value = struct.unpack( "I", string )[ 0 ]
self.assertEquals (
python_bit_packer_add_bytes( string, bitPacker ),
CPC_ERROR_CODE_NO_ERROR
)
result = python_bit_stream_get_bits( bitStream, 32 )
self.assertNotEquals( result, None )
( numberOfBits, buffer ) = result
self.assertEquals( numberOfBits, 32 )
self.assertEquals( len( buffer ), 4 )
retrievedValue = struct.unpack( "I", buffer )[ 0 ]
self.assertEquals( value, retrievedValue )
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
self.assertEquals (
bit_packer_destroy( bitPacker ),
CPC_ERROR_CODE_NO_ERROR
)
def test_bit_stream_get_bits_int_random( self ):
for index in range( 100 ):
initialNumber = random.randint( 0, 2 ** 32 - 1 )
data = struct.pack( "I", int( initialNumber ) )
bitStream = python_bit_stream_initialize( False, data )
self.assertNotEquals( bitStream, None )
result = python_bit_stream_get_bits( bitStream, 32 )
self.assertNotEquals( result, None )
( numberOfBits, buffer ) = result
self.assertNotEquals( numberOfBits, None )
self.assertNotEquals( buffer, None )
self.assertEquals( numberOfBits, 32 )
self.assertEquals( len( buffer ), 4 )
retrievedNumber = struct.unpack( "I", buffer )[ 0 ]
self.assertEquals( initialNumber, retrievedNumber )
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
for count in range( 100 ):
data = ''
for _ in range( 1000 ):
initialNumber = random.randint( 0, 2 ** 32 - 1 )
data = data + struct.pack( "I", int( initialNumber ) )
bitStream = python_bit_stream_initialize( False, data )
self.assertNotEquals( bitStream, None )
for index in range( 1000 ):
baseIndex = index * 4
initialNumber = struct.unpack( "I", data[ baseIndex : baseIndex + 4 ] )[ 0 ]
result = python_bit_stream_get_bits( bitStream, 32 )
self.assertNotEquals( result, None )
( numberOfBits, buffer ) = result
self.assertNotEquals( numberOfBits, None )
self.assertNotEquals( buffer, None )
self.assertEquals( numberOfBits, 32 )
self.assertEquals( len( buffer ), 4 )
retrievedNumber = struct.unpack( "I", buffer )[ 0 ]
self.assertEquals( initialNumber, retrievedNumber )
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
def test_bit_stream_get_bits_int_basic( self ):
initialNumber = 100
data = struct.pack( "I", int( initialNumber ) )
bitStream = python_bit_stream_initialize( False, data )
self.assertNotEquals( bitStream, None )
result = python_bit_stream_get_bits( bitStream, 32 )
self.assertNotEquals( result, None )
( numberOfBits, buffer ) = result
self.assertNotEquals( numberOfBits, None )
self.assertNotEquals( buffer, None )
self.assertEquals( numberOfBits, 32 )
self.assertEquals( len( buffer ), 4 )
retrievedNumber = struct.unpack( "I", buffer )[ 0 ]
self.assertEquals( initialNumber, retrievedNumber )
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
def test_bit_stream_get_bits_random( self ):
data = ''.join( random.choice( string.ascii_lowercase ) for _ in range( 1000 ) )
for numBits in [ 1, 2, 4, 8 ]:
bitStream = python_bit_stream_initialize( False, data )
self.assertNotEquals( bitStream, None )
for index in range( 0, len( data ) * 8, numBits ):
result = python_bit_stream_get_bits( bitStream, numBits )
self.assertNotEquals( result, None )
( numberOfBits, buffer ) = result
self.assertNotEquals( numberOfBits, None )
self.assertNotEquals( buffer, None )
self.assertEquals( numberOfBits, numBits )
self.assertEquals( len( buffer ), 1 )
byte = ( struct.unpack( "B", buffer )[ 0 ] >> ( 8 - numBits ) )
byteToCompare = \
struct.unpack( "B", data[ int( index / 8 ) ] )[ 0 ] \
>> ( ( 8 - numBits ) - ( index % 8 ) )
mask = 0
for maskBit in range( numBits ):
mask |= ( 1 << maskBit )
self.assertEquals( ( byte ^ ( byteToCompare & mask ) ), 0 )
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
def test_circular_bit_stream_get_bits_basic( self ):
# Test is: 0 1 2 3 ( 0b00011011 )
data = "\x1B"
bitStream = python_bit_stream_initialize( True, data )
self.assertNotEquals( None, bitStream )
for i in range( 1000 ):
for j in range( 4 ):
result = python_bit_stream_get_bits( bitStream, 2 )
self.assertNotEquals( None, result )
( numberOfBits, buffer ) = result
self.assertNotEquals( None, numberOfBits )
self.assertNotEquals( None, buffer )
self.assertEquals( numberOfBits, 2 )
self.assertEquals( len( buffer ), 1 )
byte = struct.unpack( "B", buffer )[ 0 ] >> 6
self.assertEquals( byte, j )
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
def test_bit_stream_get_bits_basic( self ):
data = "\x12"
for numBits in [ 1, 2, 4, 8 ]:
bitStream = python_bit_stream_initialize( False, data )
self.assertNotEquals( bitStream, None )
for index in range( 0, len( data ) * 8, numBits ):
result = python_bit_stream_get_bits( bitStream, numBits )
self.assertNotEquals( result, None )
( numberOfBits, buffer ) = result
self.assertNotEquals( numberOfBits, None )
self.assertNotEquals( buffer, None )
self.assertEquals( numberOfBits, numBits )
self.assertEquals( len( buffer ), 1 )
byte = ( struct.unpack( "B", buffer )[ 0 ] >> ( 8 - numBits ) )
byteToCompare = \
struct.unpack( "B", data[ int( index / 8 ) ] )[ 0 ] \
>> ( ( 8 - numBits ) - ( index % 8 ) )
mask = 0
for maskBit in range( numBits ):
mask |= ( 1 << maskBit )
self.assertEquals( ( byte ^ ( byteToCompare & mask ) ), 0 )
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
def test_bit_stream_negative( self ):
self.assertEquals( python_bit_stream_initialize( False, None ), None )
self.assertEquals( python_bit_stream_initialize( False, 1 ), None )
self.assertEquals( python_bit_stream_initialize_from_bit_packer( False, None ), None )
self.assertEquals( python_bit_stream_initialize( True, None ), None )
self.assertEquals( python_bit_stream_initialize( True, 1 ), None )
self.assertEquals( python_bit_stream_initialize_from_bit_packer( True, None ), None )
self.assertNotEquals( bit_stream_destroy( None ), CPC_ERROR_CODE_NO_ERROR )
def test_circular_initialize_from_bit_packer( self ):
bitPacker = python_bit_packer_initialize()
self.assertNotEquals( bitPacker, None )
data = "Hello"
self.assertEquals (
python_bit_packer_add_bytes (
data,
bitPacker
),
CPC_ERROR_CODE_NO_ERROR
)
bitStream = python_bit_stream_initialize_from_bit_packer( True, bitPacker )
self.assertNotEquals( bitStream, None )
self.assertEquals (
bit_packer_destroy( bitPacker ),
CPC_ERROR_CODE_NO_ERROR
)
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
bitPacker = python_bit_packer_initialize()
self.assertNotEquals( bitPacker, None )
data = ""
self.assertEquals (
python_bit_packer_add_bytes (
data,
bitPacker
),
CPC_ERROR_CODE_NO_ERROR
)
bitStream = python_bit_stream_initialize_from_bit_packer( True, bitPacker )
self.assertNotEquals( bitStream, None )
self.assertEquals (
bit_packer_destroy( bitPacker ),
CPC_ERROR_CODE_NO_ERROR
)
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
def test_initialize_from_bit_packer( self ):
bitPacker = python_bit_packer_initialize()
self.assertNotEquals( bitPacker, None )
data = "Hello"
self.assertEquals (
python_bit_packer_add_bytes (
data,
bitPacker
),
CPC_ERROR_CODE_NO_ERROR
)
bitStream = python_bit_stream_initialize_from_bit_packer( False, bitPacker )
self.assertNotEquals( bitStream, None )
self.assertEquals (
bit_packer_destroy( bitPacker ),
CPC_ERROR_CODE_NO_ERROR
)
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
bitPacker = python_bit_packer_initialize()
self.assertNotEquals( bitPacker, None )
data = ""
self.assertEquals (
python_bit_packer_add_bytes (
data,
bitPacker
),
CPC_ERROR_CODE_NO_ERROR
)
bitStream = python_bit_stream_initialize_from_bit_packer( False, bitPacker )
self.assertNotEquals( bitStream, None )
self.assertEquals (
bit_packer_destroy( bitPacker ),
CPC_ERROR_CODE_NO_ERROR
)
self.assertEquals (
bit_stream_destroy( bitStream ),
CPC_ERROR_CODE_NO_ERROR
)
def test_initialize( self ):
data = "Hello"
bitStream = python_bit_stream_initialize( False, data )
self.assertNotEquals( bitStream, None )
self.assertEquals( bit_stream_destroy( bitStream ), CPC_ERROR_CODE_NO_ERROR )
data = struct.pack( "I", 1722 )
bitStream = python_bit_stream_initialize( False, data )
self.assertNotEquals( bitStream, None )
self.assertEquals( bit_stream_destroy( bitStream ), CPC_ERROR_CODE_NO_ERROR )
data = ""
for index in range( 100 ):
value = 32767 * random.normalvariate( 0, 1 )
value = struct.pack( "i", int( value ) )
data = data + value
bitStream = python_bit_stream_initialize( False, data )
self.assertNotEquals( bitStream, None )
self.assertEquals( bit_stream_destroy( bitStream ), CPC_ERROR_CODE_NO_ERROR )
def test_circular_initialize( self ):
data = "Hello"
bitStream = python_bit_stream_initialize( True, data )
self.assertNotEquals( bitStream, None )
self.assertEquals( bit_stream_destroy( bitStream ), CPC_ERROR_CODE_NO_ERROR )
data = struct.pack( "I", 1722 )
bitStream = python_bit_stream_initialize( True, data )
self.assertNotEquals( bitStream, None )
self.assertEquals( bit_stream_destroy( bitStream ), CPC_ERROR_CODE_NO_ERROR )
data = ""
for index in range( 100 ):
value = 32767 * random.normalvariate( 0, 1 )
value = struct.pack( "i", int( value ) )
data = data + value
bitStream = python_bit_stream_initialize( True, data )
self.assertNotEquals( bitStream, None )
self.assertEquals( bit_stream_destroy( bitStream ), CPC_ERROR_CODE_NO_ERROR )
def test_initialize_destroy( self ):
data = "1"
bitStream = python_bit_stream_initialize( False, data )
self.assertNotEquals( bitStream, None )
self.assertEquals( bit_stream_destroy( bitStream ), CPC_ERROR_CODE_NO_ERROR )
bitStream = python_bit_stream_initialize( True, data )
self.assertNotEquals( bitStream, None )
self.assertEquals( bit_stream_destroy( bitStream ), CPC_ERROR_CODE_NO_ERROR )
if __name__ == '__main__':
cpc_log_set_log_level( CPC_LOG_LEVEL_ERROR )
csignal_initialize()
unittest.main()
csignal_terminate()
| 27.853097
| 95
| 0.630171
| 1,676
| 15,737
| 5.629475
| 0.063842
| 0.130578
| 0.063593
| 0.053418
| 0.881929
| 0.866879
| 0.85045
| 0.83222
| 0.828511
| 0.786222
| 0
| 0.016746
| 0.282837
| 15,737
| 564
| 96
| 27.902482
| 0.819245
| 0.00197
| 0
| 0.733696
| 0
| 0
| 0.00363
| 0
| 0
| 0
| 0.000255
| 0
| 0.345109
| 1
| 0.038043
| false
| 0
| 0.021739
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
65204eb142fd3ae2388aa35a9355011980d45bcd
| 8,688
|
py
|
Python
|
livestock/override.py
|
jayan13/livestock
|
75f4ccb246818d9cd55400d88fefbb36c168c713
|
[
"MIT"
] | null | null | null |
livestock/override.py
|
jayan13/livestock
|
75f4ccb246818d9cd55400d88fefbb36c168c713
|
[
"MIT"
] | null | null | null |
livestock/override.py
|
jayan13/livestock
|
75f4ccb246818d9cd55400d88fefbb36c168c713
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2022, alantechnologies and contributors
# For license information, please see license.txt
import frappe
from frappe.utils import add_days, flt, get_datetime, get_time, get_url, nowtime, today
from erpnext.projects.doctype.project.project import Project
from frappe.model.document import Document
class HatcheryProject(Document):
def onload(self):
self.set_onload('activity_summary', frappe.db.sql('''select activity_type,
sum(hours) as total_hours
from `tabTimesheet Detail` where project=%s and docstatus < 2 group by activity_type
order by total_hours desc''', self.name, as_dict=True))
self.update_costing()
def update_costing(self):
from_time_sheet = frappe.db.sql("""select
sum(costing_amount) as costing_amount,
sum(billing_amount) as billing_amount,
min(from_time) as start_date,
max(to_time) as end_date,
sum(hours) as time
from `tabTimesheet Detail` where project = %s and docstatus = 1""", self.name, as_dict=1)[0]
from_expense_claim = frappe.db.sql("""select
sum(total_sanctioned_amount) as total_sanctioned_amount
from `tabExpense Claim` where project = %s
and docstatus = 1""", self.name, as_dict=1)[0]
self.actual_start_date = from_time_sheet.start_date
self.actual_end_date = from_time_sheet.end_date
self.total_costing_amount = from_time_sheet.costing_amount
self.total_billable_amount = from_time_sheet.billing_amount
self.actual_time = from_time_sheet.time
self.total_expense_claim = from_expense_claim.total_sanctioned_amount
self.update_purchase_costing()
self.update_sales_amount()
self.update_billed_amount()
if self.project_type=='Hatchery':
self.calculate_tranfer_amount()
if self.project_type=='Broiler':
self.packing_cost=0
self.calculate_packing_cost()
self.calculate_gross_margin()
def calculate_packing_cost(self):
own_packing = frappe.db.get_value('Chicken Own Packing', {'Project':self.name}, ['name'])
pkcost=0
if own_packing:
stock=frappe.db.get_list("Stock Entry",filters={'chicken_own_packing': own_packing,'stock_entry_type':"Manufacture","docstatus":'1'},fields=['name'])
for stitm in stock:
itm=frappe.get_doc('Stock Entry',stitm.name)
usedpacitem=frappe.db.get_list('Packing Items',fields=['item'], pluck='item')
for pacitem in itm.items:
if pacitem.item_code in usedpacitem:
pkcost+=pacitem.amount
self.packing_cost=pkcost
def calculate_tranfer_amount(self):
amount=0
account = frappe.db.get_value('Hatchery Settings', self.hatchery, 'account')
accu=frappe.db.get_list("Stock Entry",filters={'Project': self.name,'stock_entry_type':"Material Transfer","docstatus":'1'},fields=['name'])
for ac in accu:
acc=frappe.get_doc('Stock Entry',ac.name)
exp_amt=0
base_amount=0
is_add_cost=0
for cost in acc.additional_costs:
if cost.expense_account==account:
exp_amt+=cost.amount
is_add_cost=1
if is_add_cost==1:
for item in acc.items:
base_amount+=item.basic_amount
amount+=exp_amt+base_amount
self.total_transfer_amount=amount
def calculate_gross_margin(self):
packing_cost=self.packing_cost or 0
if self.project_type=='Hatchery':
expense_amount = (flt(self.total_costing_amount) + flt(self.total_expense_claim)
+ flt(packing_cost)+ flt(self.total_purchase_cost) + flt(self.get('total_consumed_material_cost', 0)))
inc=self.total_billed_amount+self.total_transfer_amount
self.gross_margin = flt(inc) - expense_amount
if inc:
self.per_gross_margin = (self.gross_margin / flt(inc)) * 100
else:
expense_amount = (flt(self.total_costing_amount) + flt(self.total_expense_claim)
+ flt(packing_cost)+ flt(self.total_purchase_cost) + flt(self.get('total_consumed_material_cost', 0)))
self.gross_margin = flt(self.total_billed_amount) - expense_amount
if self.total_billed_amount:
self.per_gross_margin = (self.gross_margin / flt(self.total_billed_amount)) * 100
def update_purchase_costing(self):
total_purchase_cost = frappe.db.sql("""select sum(base_net_amount)
from `tabPurchase Invoice Item` where project = %s and docstatus=1""", self.name)
self.total_purchase_cost = total_purchase_cost and total_purchase_cost[0][0] or 0
def update_sales_amount(self):
total_sales_amount = frappe.db.sql("""select sum(base_net_total)
from `tabSales Order` where project = %s and docstatus=1""", self.name)
self.total_sales_amount = total_sales_amount and total_sales_amount[0][0] or 0
def update_billed_amount(self):
total_billed_amount = frappe.db.sql("""select sum(base_net_total)
from `tabSales Invoice` where project = %s and docstatus=1""", self.name)
self.total_billed_amount = total_billed_amount and total_billed_amount[0][0] or 0
def update_costing_from_trn(self,doc):
from_time_sheet = frappe.db.sql("""select
sum(costing_amount) as costing_amount,
sum(billing_amount) as billing_amount,
min(from_time) as start_date,
max(to_time) as end_date,
sum(hours) as time
from `tabTimesheet Detail` where project = %s and docstatus = 1""", self.name, as_dict=1)[0]
from_expense_claim = frappe.db.sql("""select
sum(total_sanctioned_amount) as total_sanctioned_amount
from `tabExpense Claim` where project = %s
and docstatus = 1""", self.name, as_dict=1)[0]
self.actual_start_date = from_time_sheet.start_date
self.actual_end_date = from_time_sheet.end_date
self.total_costing_amount = from_time_sheet.costing_amount
self.total_billable_amount = from_time_sheet.billing_amount
self.actual_time = from_time_sheet.time
self.total_expense_claim = from_expense_claim.total_sanctioned_amount
self.update_purchase_costing()
self.update_sales_amount()
self.update_billed_amount()
if self.project_type=='Hatchery':
amount=0
account = frappe.db.get_value('Hatchery Settings', self.hatchery, 'account')
exp_amt=0
base_amount=0
is_add_cost=0
for cost in doc.additional_costs:
if cost.expense_account==account:
exp_amt+=cost.amount
is_add_cost=1
if is_add_cost==1:
for item in doc.items:
base_amount+=item.basic_amount
amount+=exp_amt+base_amount
self.total_transfer_amount=amount
if self.project_type=='Broiler':
own_packing = frappe.db.get_value('Chicken Own Packing', {'Project':self.name}, ['name'])
pkcost=0
if own_packing==doc.chicken_own_packing:
usedpacitem=frappe.db.get_list('Packing Items',fields=['item'], pluck='item')
for pacitem in doc.items:
if pacitem.item_code in usedpacitem:
pkcost+=pacitem.amount
self.packing_cost=pkcost
self.calculate_gross_margin()
def cancel_costing_from_trn(self,doc):
from_time_sheet = frappe.db.sql("""select
sum(costing_amount) as costing_amount,
sum(billing_amount) as billing_amount,
min(from_time) as start_date,
max(to_time) as end_date,
sum(hours) as time
from `tabTimesheet Detail` where project = %s and docstatus = 1""", self.name, as_dict=1)[0]
from_expense_claim = frappe.db.sql("""select
sum(total_sanctioned_amount) as total_sanctioned_amount
from `tabExpense Claim` where project = %s
and docstatus = 1""", self.name, as_dict=1)[0]
self.actual_start_date = from_time_sheet.start_date
self.actual_end_date = from_time_sheet.end_date
self.total_costing_amount = from_time_sheet.costing_amount
self.total_billable_amount = from_time_sheet.billing_amount
self.actual_time = from_time_sheet.time
self.total_expense_claim = from_expense_claim.total_sanctioned_amount
self.update_purchase_costing()
self.update_sales_amount()
self.update_billed_amount()
if self.project_type=='Hatchery':
amount=0
account = frappe.db.get_value('Hatchery Settings', self.hatchery, 'account')
exp_amt=0
base_amount=0
is_add_cost=0
for cost in doc.additional_costs:
if cost.expense_account==account:
exp_amt+=cost.amount
is_add_cost=1
if is_add_cost==1:
for item in doc.items:
base_amount+=item.basic_amount
amount+=exp_amt+base_amount
self.total_transfer_amount=self.total_transfer_amount-amount
if self.project_type=='Broiler':
own_packing = frappe.db.get_value('Chicken Own Packing', {'Project':self.name}, ['name'])
pkcost=0
if own_packing==doc.chicken_own_packing:
usedpacitem=frappe.db.get_list('Packing Items',fields=['item'], pluck='item')
for pacitem in doc.items:
if pacitem.item_code in usedpacitem:
pkcost+=pacitem.amount
self.packing_cost=self.packing_cost-pkcost
self.calculate_gross_margin()
| 35.753086
| 152
| 0.739871
| 1,285
| 8,688
| 4.71284
| 0.106615
| 0.044584
| 0.038639
| 0.028071
| 0.820343
| 0.787153
| 0.769485
| 0.747853
| 0.706902
| 0.706902
| 0
| 0.009245
| 0.15343
| 8,688
| 243
| 153
| 35.753086
| 0.81414
| 0.011625
| 0
| 0.716578
| 0
| 0
| 0.247874
| 0.024345
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053476
| false
| 0
| 0.02139
| 0
| 0.080214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
334fa0d10eed72b7e8717c8ed51e0378efc49ab6
| 103
|
py
|
Python
|
rpython/jit/backend/test/test_zll_stress_2.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
rpython/jit/backend/test/test_zll_stress_2.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
rpython/jit/backend/test/test_zll_stress_2.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
from rpython.jit.backend.test import zll_stress
def test_stress_2():
zll_stress.do_test_stress(2)
| 20.6
| 47
| 0.796117
| 18
| 103
| 4.222222
| 0.611111
| 0.236842
| 0.289474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0.116505
| 103
| 4
| 48
| 25.75
| 0.813187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
336a14618cc92c74396f30771a9c6d9269d2ce36
| 965
|
py
|
Python
|
tests/test_cli.py
|
blaisep/chaostoolkit
|
3fec59b58552042407a9f0957179c51124a183bc
|
[
"Apache-2.0"
] | 1
|
2020-02-18T03:31:14.000Z
|
2020-02-18T03:31:14.000Z
|
tests/test_cli.py
|
blaisep/chaostoolkit
|
3fec59b58552042407a9f0957179c51124a183bc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cli.py
|
blaisep/chaostoolkit
|
3fec59b58552042407a9f0957179c51124a183bc
|
[
"Apache-2.0"
] | 1
|
2019-02-16T11:03:30.000Z
|
2019-02-16T11:03:30.000Z
|
# -*- coding: utf-8 -*-
import click
from click.testing import CliRunner
from chaostoolkit.cli import cli
def test_path_to_experiment_description_is_mandatory():
runner = CliRunner()
result = runner.invoke(cli, ['run'])
assert result.exit_code == 2
assert result.exception
assert 'Error: Missing argument "path".' in result.output
def test_path_to_experiment_description_is_mandatory():
runner = CliRunner()
result = runner.invoke(cli, ['run', 'invalid.jsn'])
assert result.exit_code == 2
assert result.exception
assert 'Error: Invalid value for "path": Path '\
'"invalid.jsn" does not exist.' in result.output
def test_path_to_experiment_description_is_mandatory():
runner = CliRunner()
result = runner.invoke(cli, ['run', 'invalid.jsn'])
assert result.exit_code == 2
assert result.exception
assert 'Error: Invalid value for "path": Path "invalid.jsn" does not exist.' in result.output
| 31.129032
| 97
| 0.707772
| 126
| 965
| 5.253968
| 0.309524
| 0.108761
| 0.049849
| 0.058912
| 0.850453
| 0.850453
| 0.850453
| 0.850453
| 0.850453
| 0.850453
| 0
| 0.005057
| 0.180311
| 965
| 30
| 98
| 32.166667
| 0.831858
| 0.021762
| 0
| 0.636364
| 0
| 0
| 0.208068
| 0
| 0
| 0
| 0
| 0
| 0.409091
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
336d57e25fcbdec438a84d75a88af46f516654ce
| 5,988
|
py
|
Python
|
utils.py
|
MaelC001/SPI2022
|
ddcbb0b1e480fe0ba43e1dc90826a157a5f58828
|
[
"MIT"
] | null | null | null |
utils.py
|
MaelC001/SPI2022
|
ddcbb0b1e480fe0ba43e1dc90826a157a5f58828
|
[
"MIT"
] | null | null | null |
utils.py
|
MaelC001/SPI2022
|
ddcbb0b1e480fe0ba43e1dc90826a157a5f58828
|
[
"MIT"
] | 1
|
2022-03-22T17:21:05.000Z
|
2022-03-22T17:21:05.000Z
|
'''
@Author danielvallejo237
'''
import unidecode
import json
import re
import numpy as np
import regex
class RecetaVS:
def __init__(self,texto):
self.texto=texto
self.author=""
self.ing=[]
self.titulo=""
self.preparacion=""
self.unaccented=unidecode.unidecode(self.texto.lower())
self.resumen=""
self.source=""
if self.isRecipie():
self.gettitle()
self.get_recipie()
self.get_ingredients()
self.resumen=self.getKwd()
def isRecipie(self):
"Vemos si un texto es una receta con base en las palabras que contiene"
score=0.0
scores={}
scores['elaboracion']=0.3
scores['nthoki']=0.5
scores['ingredientes']=0.1
score+=len(re.findall(r'elaboracion:',self.unaccented))*scores['elaboracion']
score+=len(re.findall(r'nthoki',self.unaccented))*scores['nthoki']
score+=len(re.findall(r'ingredientes:',self.unaccented))*scores['ingredientes']
self.hasIng=len(re.findall(r'ingredientes:',self.unaccented))*scores['ingredientes']>0
if score>0.5:
return True
else:
return False
def gettitle(self):
split=self.texto.split("\n")
if split[0] !="":
self.titulo=split[0]
else:
self.titulo=split[1]
self.author=split[2]
self.titulo=unidecode.unidecode(self.titulo.lower())
self.author=unidecode.unidecode(self.author.lower())
def get_recipie(self):
rgx = regex.compile(r'(?si)(?|{0}(.*?){1}|{1}(.*?){0})'.format('elaboracion', 'nthoki'))
self.preparacion=rgx.findall(self.unaccented)[0][0]
self.preparacion=' '.join(self.preparacion.split('\n'))
def get_ingredients(self):
if self.hasIng:
rgx = regex.compile(r'(?si)(?|{0}(.*?){1}|{1}(.*?){0})'.format('ingredientes', 'elaboracion'))
ingredientes=rgx.findall(self.unaccented)[0][0]
splt=ingredientes.split('\n')
splt=splt[1:-1]
self.ing=splt
def getKwd(self):
nstr=""
if len(self.ing)>0:
for s in self.ing:
nstr=nstr+' '.join(re.findall(r'[^\d\W]+',s))+' '
return unidecode.unidecode(nstr.lower())
def add_source(self,source):
self.source=source
def toJSON(self):
kw=self.getKwd()
data_set = {"name": self.titulo, "author": self.author, "ing": self.ing, "prep":self.preparacion, "kwd":kw,"source":self.source}
json_dump = json.dumps(data_set)
return json_dump
class RecetaGeneral:
def __init__(self,texto):
self.texto=texto
self.author=""
self.ing=[]
self.titulo=""
self.preparacion=""
self.unaccented=unidecode.unidecode(self.texto.lower())+'<pageend>'
self.resumen=""
self.source=""
if self.isRecipie():
self.gettitle()
self.get_recipie()
self.get_ingredients()
self.resumen=self.getKwd()
def isRecipie(self):
"Vemos si un texto es una receta con base en las palabras que contiene"
score=0.0
scores={}
scores['elaboracion']=0.3
scores['preparacion']=0.3
scores['ingredientes']=0.5
score+=len(re.findall(r'elaboracion:',self.unaccented))*scores['elaboracion']
score+=len(re.findall(r'preparacion:',self.unaccented))*scores['preparacion']
score+=len(re.findall(r'ingredientes:',self.unaccented))*scores['ingredientes']
self.hasIng=len(re.findall(r'ingredientes:',self.unaccented))*scores['ingredientes']>0
if score>0.5:
return True
else:
return False
def gettitle(self):
split=self.texto.split("\n")
indice=1
if split[0] !="":
self.titulo=split[0]
else:
self.titulo=split[0]+split[1]
indice+=1
while(len(re.findall('\w+',self.titulo)[0])<3):
#print(re.findall('\w+',self.titulo)[0])
self.titulo=re.findall('\w+',self.titulo)[0] + split[indice]
indice+=1
if len(re.findall(r'dientes',split[indice].lower()))>0:
self.author=""
else:
self.author=split[indice]
self.titulo=unidecode.unidecode(self.titulo.lower())
self.author=unidecode.unidecode(self.author.lower())
def get_recipie(self):
try:
rgx = regex.compile(r'(?si)(?|{0}(.*?){1}|{1}(.*?){0})'.format('elaboracion', '<pageend>'))
self.preparacion=rgx.findall(self.unaccented)[0][0]
except:
rgx = regex.compile(r'(?si)(?|{0}(.*?){1}|{1}(.*?){0})'.format('preparacion', '<pageend>'))
self.preparacion=rgx.findall(self.unaccented)[0][0]
self.preparacion=' '.join(self.preparacion.split('\n'))
def get_ingredients(self):
if self.hasIng:
try:
rgx = regex.compile(r'(?si)(?|{0}(.*?){1}|{1}(.*?){0})'.format('ingredientes', 'elaboracion'))
ingredientes=rgx.findall(self.unaccented)[0][0]
except:
rgx = regex.compile(r'(?si)(?|{0}(.*?){1}|{1}(.*?){0})'.format('ingredientes', 'preparacion'))
ingredientes=rgx.findall(self.unaccented)[0][0]
splt=ingredientes.split('\n')
splt=splt[1:-1]
self.ing=splt
def getKwd(self):
nstr=""
if len(self.ing)>0:
for s in self.ing:
nstr=nstr+' '.join(re.findall(r'[^\d\W]+',s))+' '
return unidecode.unidecode(nstr.lower())
def add_source(self,source):
self.source=source
def toJSON(self):
kw=self.getKwd()
data_set = {"name": self.titulo, "author": self.author, "ing": self.ing, "prep":self.preparacion, "kwd":kw,"source":self.source}
json_dump = json.dumps(data_set)
return json_dump
| 37.660377
| 136
| 0.560454
| 711
| 5,988
| 4.683544
| 0.129395
| 0.048048
| 0.033033
| 0.035135
| 0.869069
| 0.869069
| 0.85015
| 0.85015
| 0.85015
| 0.841441
| 0
| 0.018808
| 0.263026
| 5,988
| 159
| 137
| 37.660377
| 0.735781
| 0.034068
| 0
| 0.816327
| 0
| 0
| 0.136556
| 0.032449
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108844
| false
| 0
| 0.034014
| 0
| 0.210884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
683035178dbdf6917b8b132b3c32123a542a760b
| 249
|
py
|
Python
|
backend/aprs/api/serializers.py
|
ruilvo/radioamadorismo-website
|
65c85a62cae367cbe235854a7c1a051fbc84b461
|
[
"Apache-2.0"
] | 7
|
2021-12-03T16:16:02.000Z
|
2021-12-16T18:13:10.000Z
|
backend/aprs/api/serializers.py
|
ruilvo/radioamadorismo-website
|
65c85a62cae367cbe235854a7c1a051fbc84b461
|
[
"Apache-2.0"
] | null | null | null |
backend/aprs/api/serializers.py
|
ruilvo/radioamadorismo-website
|
65c85a62cae367cbe235854a7c1a051fbc84b461
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import serializers
class CallsignSerializer(serializers.Serializer):
callsign = serializers.CharField(max_length=200)
class PasscodeSerializer(serializers.Serializer):
passcode = serializers.CharField(max_length=200)
| 24.9
| 52
| 0.823293
| 25
| 249
| 8.08
| 0.6
| 0.207921
| 0.227723
| 0.287129
| 0.316832
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026906
| 0.104418
| 249
| 9
| 53
| 27.666667
| 0.878924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.4
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
683611a41b1d81d88568f95531fa780e865f92b2
| 6,980
|
py
|
Python
|
moveis/saguao.py
|
augusnunes/titanic-escape
|
0d856555a810870aff62c0d0378a26c2b4cf2fe1
|
[
"MIT"
] | null | null | null |
moveis/saguao.py
|
augusnunes/titanic-escape
|
0d856555a810870aff62c0d0378a26c2b4cf2fe1
|
[
"MIT"
] | null | null | null |
moveis/saguao.py
|
augusnunes/titanic-escape
|
0d856555a810870aff62c0d0378a26c2b4cf2fe1
|
[
"MIT"
] | null | null | null |
from superclasses.movel import Movel
import time
from copy import deepcopy
# Interativos
class Cofre(Movel):
# itens esconditos é lista de itens que vai pro inventario depois de quebrar
def __init__(self, name, message, itens_escondidos):
super().__init__(name, message)
self.itens_escondidos = itens_escondidos
self.actions = {
'ajuda': 'retorna as ações possíveis',
'voltar': 'Parar de interagir com %s' % self.name,
'abrir': 'Tentar abrir o cofre'
}
def play(self, inventory, inv2):
# display message
self.olhar()
# Entra em um novo loop para interação com aquele móvel
while True:
try:
action = input("\nInsira um comando para interagir com %s:\n>>>" % self.name)
if action == 'ajuda':
for key, value in self.actions.items():
print("%s: %s" % (key, value))
if action == 'abrir':
return self.abrir(inventory, inv2)
if action == 'voltar':
break
except Exception:
print('Interação inválida')
continue
def abrir(self, inventory, inventory2):
# CASO O INVENTARIO ESTEJA VAZIO
if not inventory2.itens:
print("\nEu preciso de energia pra abrir esse cofre...")
return False
# caso as ferramentas não estejam no inventario
elif self.itens_escondidos == []:
print('Voce ja abriu esse cofre')
return False
if 'energia' not in [i.name for i in inventory2.itens]:
print("\nNão liguei a energia ainda...")
return False
senha = str(input('Agora que tem energia, posso por a senha de 5 digitos: '))
if senha == '77815':
print("abrindo cofre...")
animation = "|/-\\"
idx = 0
while 1:
if idx == 30:
break
print(animation[idx % len(animation)], end="\r")
idx += 1
time.sleep(0.1)
########
itens_a_passar = deepcopy(self.itens_escondidos)
inventory.add(itens_a_passar)
self.itens_escondidos = []
print('Você pegou a chave do bote')
return False
else:
print('Ih, nao eh essa senha nao...')
return False
class Tv(Movel):
# itens esconditos é lista de itens que vai pro inventario depois de quebrar
def __init__(self, name, message, itens_escondidos):
super().__init__(name, message)
self.itens_escondidos = itens_escondidos
self.actions = {
'ajuda': 'retorna as ações possíveis',
'voltar': 'Parar de interagir com %s' % self.name,
'desmontar': 'Tentar desmontar a Televisão'
}
def play(self, inventory, inv2):
# display message
self.olhar()
# Entra em um novo loop para interação com aquele móvel
while True:
try:
action = input("\nInsira um comando para interagir com %s:\n>>>" % self.name)
if action == 'ajuda':
for key, value in self.actions.items():
print("%s: %s" % (key, value))
if action == 'desmontar':
return self.desmontar(inventory)
if action == 'voltar':
break
except Exception:
print('Interação inválida')
continue
def desmontar(self, inventory):
# CASO O INVENTARIO ESTEJA VAZIO
if not inventory.itens:
print("\nEu preciso de algo para abrir essa Tv...")
return False
# caso as ferramentas não estejam no inventario
elif 'ferramentas' not in [i.name for i in inventory.itens]:
print("\nNão tenho uma caixa de ferramentas no meu inventário...")
return False
elif self.itens_escondidos == []:
print('Voce ja abriu a televisão')
else:
print("Abrindo a Televisão...")
animation = "|/-\\"
idx = 0
while True:
if idx == 30:
break
print(animation[idx % len(animation)], end="\r")
idx += 1
time.sleep(0.1)
itens_a_passar = deepcopy(self.itens_escondidos)
inventory.add(itens_a_passar)
self.itens_escondidos = []
print('Um fusivel foi adicionado ao seu inventário')
return False
class Radio(Movel):
# itens esconditos é lista de itens que vai pro inventario depois de quebrar
def __init__(self, name, message, itens_escondidos):
super().__init__(name, message)
self.itens_escondidos = itens_escondidos
self.actions = {
'ajuda': 'retorna as ações possíveis',
'voltar': 'Parar de interagir com %s' % self.name,
'desmontar': 'Tentar desmontar o radio'
}
def play(self, inventory, inv2):
# display message
self.olhar()
# Entra em um novo loop para interação com aquele móvel
while True:
try:
action = input("\nInsira um comando para interagir com %s:\n>>>" % self.name)
if action == 'ajuda':
for key, value in self.actions.items():
print("%s: %s" % (key, value))
if action == 'desmontar':
return self.desmontar(inventory)
if action == 'voltar':
break
except Exception:
print('Interação inválida')
continue
def desmontar(self, inventory):
# CASO O INVENTARIO ESTEJA VAZIO
if not inventory.itens:
print("\nEu preciso de algo para abrir o rádio...")
return False
# caso as ferramentas não estejam no inventario
elif 'ferramentas' not in [i.name for i in inventory.itens]:
print("\nNão tenho uma caixa de ferramentas no meu inventário...")
return False
elif self.itens_escondidos == []:
print('Voce ja abriu esse aparelho')
else:
print("Abrindo o radio...")
animation = "|/-\\"
idx = 0
while 1:
if idx == 30:
break
print(animation[idx % len(animation)], end="\r")
idx += 1
time.sleep(0.1)
itens_a_passar = deepcopy(self.itens_escondidos)
inventory.add(itens_a_passar)
self.itens_escondidos = []
print('Um fusivel foi adicionado ao seu inventário')
return False
| 30.884956
| 93
| 0.519628
| 738
| 6,980
| 4.841463
| 0.201897
| 0.075567
| 0.063812
| 0.040302
| 0.84075
| 0.834593
| 0.834593
| 0.821439
| 0.821439
| 0.809404
| 0
| 0.007728
| 0.388252
| 6,980
| 226
| 94
| 30.884956
| 0.82904
| 0.096991
| 0
| 0.796053
| 0
| 0
| 0.192387
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059211
| false
| 0.039474
| 0.019737
| 0
| 0.190789
| 0.164474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
686de4a0cc5e96db46a76ff8561d82fbbb66ecfa
| 101
|
py
|
Python
|
sprites/__init__.py
|
GpNico/MultiObjectDatasetCreator
|
bfe505e06c66a88da7bbe1530b9300ca47028200
|
[
"MIT"
] | 4
|
2020-01-06T08:50:04.000Z
|
2021-12-06T08:41:13.000Z
|
sprites/__init__.py
|
GpNico/MultiObjectDatasetCreator
|
bfe505e06c66a88da7bbe1530b9300ca47028200
|
[
"MIT"
] | 2
|
2021-06-08T20:48:25.000Z
|
2021-09-08T01:35:58.000Z
|
sprites/__init__.py
|
GpNico/MultiObjectDatasetCreator
|
bfe505e06c66a88da7bbe1530b9300ca47028200
|
[
"MIT"
] | 2
|
2020-11-19T14:20:29.000Z
|
2021-01-12T12:00:44.000Z
|
from .dsprites_binary_color import generate_dsprites
from .mnist_binary import generate_binary_mnist
| 33.666667
| 52
| 0.90099
| 14
| 101
| 6.071429
| 0.5
| 0.329412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079208
| 101
| 2
| 53
| 50.5
| 0.913978
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
68822b12d969f81140b2b7addde85f711e142801
| 3,319
|
py
|
Python
|
ddos.py
|
MR-ID19/PASS-DDOS
|
ef03ab589aa9b92e2b55895d75d16fa6528983fd
|
[
"Apache-2.0"
] | 5
|
2020-05-07T09:45:52.000Z
|
2021-02-17T04:21:58.000Z
|
ddos.py
|
MR-ID19/PASS-DDOS
|
ef03ab589aa9b92e2b55895d75d16fa6528983fd
|
[
"Apache-2.0"
] | 1
|
2020-07-13T03:31:22.000Z
|
2020-07-13T03:31:22.000Z
|
ddos.py
|
MR-ID19/PASS-DDOS
|
ef03ab589aa9b92e2b55895d75d16fa6528983fd
|
[
"Apache-2.0"
] | 1
|
2021-03-25T10:49:44.000Z
|
2021-03-25T10:49:44.000Z
|
#jangan di recode om :(
#susah bikin nya xixixi
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b64decode("eJyNV81zE8kV7xlJNpa/wViA+WhgAW/wSshmbTAsjowd22sjO5JcWgSOqqXplVofM2K6B9vEVKWK/QdyTC65JKnKJf9CDjnlnFxSlUsql9xyzyV5r2dGshH5GEk9r9+8z9973dOqkuCKwe/78JN/iRBiwdcgLUJKXdogJSOkTVIyQzpCSpGQjpJSNKRjpBQjlkk2NnmMNAaIFSEfDGJY0T5OrI8z0McZ7OOc6+MMAceK99jDPnsE2aM4jPSejfnPxklpkBz9gvBzxJpA1trBT0lpiPA4sSZxXhrGB6URwocJHyRNk7gPDev8x0723vyTWBfQxxThIDxELn1r9nm72ONM+5xEH+fSxxywXLQu97hXfLmZHueqz7lGSqMExzFiXSelcT1OED5JeIQ0zqOQdUNn8A/DolrgJgrYvzkrc0XLUJNfCJigdRmh6CocnVW4pRV+9CmFHuuKzwLtZWLdDrnXyeWE9RlJAHB3zvDuat49TBK4U8Sa1Ukef034xVBwVptsTBPrc2J9j3wwNX0fMQjouYAGYDhIJggf1UUdI0X7Loki7xJpxokbjxiGYX3Rc3dDu7MN8g3Ewy4THiUNQB4WxAyxkuQ7IK4SK6WJa8R6oInrxEqji4Q1TxJ8DO4LcId4x9Fawnroz3SjQXo1ULlBjiAlGjDXDp7q7gNnN4n1JeHXwaVRukX4LdK4je4NfoM0PsM5xhc0wKLfKEeR4tElAgJrB9CygOcd0rirld78LlLUYMLCzM8uwSoX/4IrKx8DOfNqIf0k3aa0jBf1r9O0PymfpvWstqCv36+cMXOCIvT+HAqfplO+LqWv9YPAEBBntYEBEvfvo1CPPtHf8H5CT7pB9GvD4662T3+kHWbQ7xu4qdB3lw5VAith3H3amn//nnZ0in4dIpaiPd867ytaO/1kYaFd2N3dydPsywzdX8ts0o2tnXV5yyQkDtYXwXrcx75bkODTrQ/9P6+4X4iUP0sFNl+Hs3JAnpZHcdEWKlQM1fHuw/CRhn6g7foUjh7ndsqPO/U/IkwFyaUQOiDm7s2VMSq05DoNXoVI4vETulULNJ5Q1zlm78S7CnO5y9L/yfIJKG0IVfcqWqmuVEcup1I1UVdeJVl12qkdzPOHGOue7ymllV46XsGrcFTSElSL0L0wmk96yniq7rjaU845pjVmy/+eOCqtCubSipCMWoJ2WJPTOnM9SaVXkdTyWt7y24+VgkX4xxV5rddOX33yks/8dp2HhoLpi0x+fzuTpXuZfL64m1sDFg2auclazKM1qmBsciWalHUYZQ1W+/nP8PrDiozjUUExV1HLcqSYuAfTq73lsJ4trOdoYXO9a34ZOn7qlMB2fn17P79FV3fzeTnYeyBvAr3HpDx0XIvmIZI6zQsYm8ymVafCaIvVhFzwNRb04sgDPlVXQI0QJFqtM9vmrU9VK0jgzyuSggG6b9XplvLoplfx7JqgxQx98Ojx4sPFxYdL6cdp7YVus5YDkhDHNpOiTle9phJzNMtsJaBkUC3gMBrGbB+zYGk/eLIwHyztDCK9tkVf7O9ktkQEwUMEd3Yza1vZjWQyGdTxbyvKQJ8Cj2TCxGEPhi0y8as//R1nUVRFHNdzud0coPcyTjc8mwE6y1SmUPcp3XSkslmb02cw2XOgSEh8vf9iBzaX/Houk92AcEAX+PIy6MTDyj93bOXUl2nnGNrXnqfyBC2mlx4l0/OPkvMLyS8X6aMH9LG+5mhQgbPPhS2o6MyhnABsO+B/LtDwGdDYih4zu4Zt3hSuaM/RFq8AtBUG6DW7k5onw9ZYAjcCoZHnYAgXr0KYtAhygCERn8PDw6R8goGfAgnKA40MPZSxLUZf8DaTHk6LvCKF4vQlxrPKbViBq7Bgq+G5GGv1HL0UYQB/DYMoU79wVQQPukssgsdcBe/sGL5suwdSOOnCQSYC7/0YSahBoiL+KWAA3/1o6Jx+N8NxOT+LGWT99NAf7KI53QA40Q3xSj6A8YB+RW8ngw/Nw25nw8YChIXBrwm6w5qYVCgyO4T6EzjUXc6ssu21y21P8SOFHln1jSdcrhCzgutxFT8jqflKtLnC/wbVLiU7LaGU7mHlakMub3Em+SzG3Bsktq9VUyNw67jCVmXYMJQn22EViTlqxo15U2ONwtEQ66c+1pB6w9QjwIi3KEkAhu/1PwyAWw2QxiCBFoUCfNBFeW+QEzzjYHRZH0MMeRbd5XR6A37cwq7p0FtcKe5KzbZETSiphjFez64qjykB6wGjajgQP8oA5JbT9q2wdqfFZzFwjUZb1rSsxRTrR2ESbjVuQ80UL3tuq9xhqn6EAmiLGNeMGaMaNlwsBOKXPhDYKJA/3CB3aD9ExURUcIxi7pD4uxI5TmL6PjKQ3rd+oyLrHEIGSKkh5E4ju2jfIVE0ENdn4J8Q470J/Qimh8mJGRxS8QhchB5FGyPa3WjX5piO6xvdwRpwOQPjxnqBpu5Iulko7KXSyXR8czdfWKZ3ZDwuH+q95lVBWLDKM7AOtx2bN6WgGXzVQEO/5S7d5m3YjJvChtZe2y1mD5bp7DjWDxspN90tolOFfUQXMfOD8lZ2vaArl999vl3OF3LrmRf6WdWBd0FVKb2XdnSBcEfShOS2pYk6bJi6t7nrOq7fKNgysu4pyzm0FS6k/OZ+oZxbK+b89dByoOMjoWRYUd0J8EbUq5b3twFGzpRi1eZ75E7r2g8aQ+YlM0IixgVzxjhvJMwJQz96dfsAoEFpmseXLbeoY1Nti85KTPdzPOjiFg+7P4KE09s5/sbjUsllqv2BZo2r2o+nfrvx13ffrczeQxzP45DAYTRsYHksdaL+FgArJIcA+mu+xXnHp2ABCE3h7sN1nkKrueywLOyOpzQgneB1qAUauevoCdsjh6cEvSfgZuQGkWr/Le4vNgj3rWbAmtMMfiSUXxR/t+m0WJWrUb2iFNaucoyvO/20xgTHGurEUXkH+iQXOdM/ubv4EOFjrVbZT9ffAo78pJAsaLbuG5X2U8cS+BXsdLB1xrAPPNeFuAMruckzW+CZ0iMqT9sOHFH4Mw33r2EYMbqfaMwYNT7xM2PjMEZiRkxzBswLID0AP5xFzZgxfkZ+MqRjA4b+mBdhHDIuGheCcdqcMqZQIhIxlmA+gVoRsBgdGh6KDU1+YaKP80YcJCbNUWPM+DePKMMV"))))
| 663.8
| 3,242
| 0.963845
| 104
| 3,319
| 30.759615
| 0.971154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155777
| 0.003917
| 3,319
| 4
| 3,243
| 829.75
| 0.811857
| 0.013558
| 0
| 0
| 0
| 0.5
| 0.973105
| 0.973105
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
68ac5c4d5195c701459ef498a7213479034cefcb
| 6,472
|
py
|
Python
|
src/foxdot/research/ryan-kirkbride/180430_1259_noodling_around.py
|
Neko250/aisthesis
|
1d4a2c3070d10596c28b25ea2170523583e7eff0
|
[
"Apache-2.0"
] | 4
|
2018-06-29T18:39:34.000Z
|
2021-06-20T16:44:29.000Z
|
src/foxdot/research/ryan-kirkbride/180430_1259_noodling_around.py
|
Neko250/aisthesis
|
1d4a2c3070d10596c28b25ea2170523583e7eff0
|
[
"Apache-2.0"
] | null | null | null |
src/foxdot/research/ryan-kirkbride/180430_1259_noodling_around.py
|
Neko250/aisthesis
|
1d4a2c3070d10596c28b25ea2170523583e7eff0
|
[
"Apache-2.0"
] | null | null | null |
"""
Ryan Kirkbride - Noodling around:
https://www.youtube.com/watch?v=CXrkq7u69vU
How to:
- Run the statements line by line (alt+enter),
go to the next one whenever you feel like
- The "#### > run block <" blocks should be
executed together (ctrl+enter)
- If you want to fast-forward through the song,
just execute the blocks together (ctrl+enter)
from the beginning, so you don't have to go
through every variation of each instrument
- Enjoy ! :+1:
"""
Scale.default = Scale.minor
Root.default = -4
Clock.bpm = 136
d1 >> play(P["x---o---"],)
d1 >> play(P["x---o---"].layer("mirror"),pan=(-1,1),)
d1 >> play(P["x--(-[--])o---"].layer("mirror"),pan=(-1,1),)
d1 >> play(P["x--(-[--])o--(-=)"].layer("mirror"),pan=(-1,1),)
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),)
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,)
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,rate=var([1,4],[28,4]),)
d2 >> play(PZip("Vs"," n "),sample=1,)
d2 >> play(PZip("Vs"," n "),sample=2,)
d2 >> play(PZip("Vs"," n "),sample=2,).every(3,"stutter")
d2 >> play(PZip("Vs"," n "),sample=2,).every(3,"stutter",dur=1)
d2 >> play(PZip("Vs"," n "),sample=2,hpf=var([0,4000],[28,4]),).every(3,"stutter",dur=1)
b1 >> dirt(var([0,2,-1,3]),)
b1 >> dirt(var([0,2,-1,3]),dur=PDur(3,8),)
b1 >> dirt(var([0,2,-1,3]),dur=PDur(3,8),bits=4,)
b1 >> dirt(var([0,2,-1,3]),dur=PDur(3,8),bits=4,lpf=80,)
b1 >> dirt(var([0,2,-1,3]),dur=PDur(3,8),bits=4,lpf=80,fmod=(0,1),)
k1 >> karp()
k1 >> karp(oct=6,)
k1 >> karp(dur=1/4,oct=6,)
k1 >> karp(dur=1/4,oct=var([6,7]),)
k1 >> karp(dur=1/4,oct=var([6,7]),sus=1/2,)
k1 >> karp(dur=1/4,oct=var([6,7]),sus=1,)
k1 >> karp(dur=1/4,oct=var([6,7]),sus=1,) + var([0,-1,1,0])
k1 >> karp(dur=1/4,oct=var([6,7]),sus=1,rate=P[:32],) + var([0,-1,1,0])
k1 >> karp(dur=1/4,oct=var([6,7]),sus=1,rate=P[:32]*(1,2),) + var([0,-1,1,0])
k1 >> karp(dur=1/4,oct=var([6,7]),sus=1,rate=P[:32]*(1,2),delay=(0,1/8),) + var([0,-1,1,0])
k1 >> karp(dur=1/4,oct=var([6,7]),sus=1,rate=P[:32]*(1,2),delay=(0,1/8),lpf=linvar([400,5000],12),) + var([0,-1,1,0])
k1 >> karp(dur=1/4,oct=var([6,7]),sus=1,rate=P[:32]*(1,2),delay=(0,1/8),lpf=linvar([400,5000],12),pan=linvar([-1,1],8),) + var([0,-1,1,0])
k1 >> karp(dur=1/4,oct=var([6,7]),sus=1,rate=P[:32]*(1,2),delay=(0,1/8),lpf=linvar([400,5000],12),pan=linvar([-1,1],8),) + var([0,-1,1,-7])
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,rate=var([1,4],[28,4]),).every(5,"stutter",4,pan=[-1,1])
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,rate=var([1,4],[28,4]),).every(5,"stutter",4,pan=[-1,1],rate=4)
p1 >> blip([0,4,7,9],)
p1 >> blip([0,4,7,9],oct=6,)
p1 >> blip([0,4,7,9],oct=6,sus=2,)
p1 >> blip([0,4,7,9],oct=6,sus=2,dur=1/2,)
p1 >> blip([var([0,-1,1,0]),4,7,9],oct=6,sus=2,dur=1/2,)
p1 >> blip([var([0,-1,1,0]),4,[7,10],9],oct=6,sus=2,dur=1/2,)
p1 >> blip([var([0,-1,1,0]),4,[7,10],9],oct=7,sus=2,dur=1/2,)
p1 >> blip([var([0,-1,1,0]),4,[7,10],9],oct=(6,7),sus=2,dur=1/2,)
d3 >> play("[--]")
p1 >> blip([var([0,-1,1,0]),4,[7,10],9],oct=(6,7),sus=2,dur=PDur(5,8),)
p1 >> blip([var([0,-1,1,0]),4,[7,10],9],oct=(6,7),sus=2,dur=PDur(5,8),chop=4)
p1 >> blip([var([0,-1,1,0]),4,[7,10],9],oct=(6),sus=2,dur=PDur(5,8),chop=4)
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,rate=var([1,4],[28,4]),).every(5,"stutter",0,pan=[-1,1],rate=4)
k1.stop()
d2.solo()
Scale.default = "major"
s1 >> swell((0,2,4,const(6)),dur=4,)
s1 >> swell((0,2,4,const(6)),dur=4,) + var([0,1],8)
s1 >> swell((0,2,4,const(6)),dur=4,) + var([0,-1],8)
s1 >> swell((0,2,4,const(6)),dur=4,) + var([0,1],8)
Scale.default = Pvar([Scale.major,Scale.minor],16)
s1 >> swell((0,2,4,const(6)),dur=4,) + var([0,[1,-1]],8)
s1.solo()
b1 >> dirt(var([0,[1,-1]],8),dur=PDur(3,8),bits=4,lpf=80,fmod=(0,1),)
b1 >> dirt(var([0,[1,-1]],8),dur=PDur(3,8),bits=0,lpf=80,fmod=(0,1),)
b1 >> dirt(var([0,[1,-1]],8),dur=PDur(3,8),bits=0,lpf=0,fmod=(0,1),)
b1 >> bass(var([0,[1,-1]],8),dur=PDur(3,8),bits=0,lpf=0,fmod=(0,1),)
b1 >> bass(var([0,[1,-1]],8),dur=PDur(3,8),bits=0,lpf=0,fmod=(0,0),)
b1 >> bass(var([0,[1,-1]],8),dur=PDur(3,8),bits=0,lpf=0,) + [0,4,const(7)]
b1 >> bass(var([0,[1,-1]],8),dur=PDur(5,8),bits=0,lpf=0,) + [0,4,const(7)]
b1 >> bass(var([0,[1,-1]],8),dur=PDur(5,12),bits=0,lpf=0,) + [0,4,const(7)]
d2 >> play(PZip("Vs"," n "),sample=2,hpf=var([0,4000],[28,4]),).every(3,"stutter",dur=1)
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,rate=var([1,4],[28,4]),)
k2 >> karp([0,7,6,4,2],)
k2 >> karp([0,7,6,4,2],sus=2,)
k2 >> karp([0,7,6,4,2],sus=2,dur=PDur(5,8),chop=4,)
k2 >> karp([0,7,6,4,2],sus=2,dur=PDur(5,8),chop=4,oct=7,)
k2 >> karp(P[var([0,1],8),7,6,4,2],sus=2,dur=PDur(5,8),chop=4,oct=7,)
k2 >> karp(P[var([0,1],8),7,6,4,2].layer("mirror"),sus=2,dur=PDur(5,8),chop=4,oct=7,)
k2 >> karp(P[var([0,1],8),7,6,4,2].layer("mirror"),sus=2,dur=PDur(5,8),chop=4,oct=7,delay=(0,0.25),)
k2.solo()
b1 >> bass(var([0,[1,-1]],8),dur=PDur(5,12),bits=0,lpf=0,) + [0,4,const(7)]
d2 >> play(PZip("Vs"," D "),sample=0,hpf=var([0,4000],[28,4]),).every(3,"stutter",dur=1)
d2 >> play(PZip("Vs"," D D"),dur=PDur(5,8),sample=0,hpf=var([0,4000],[28,4]),).every(3,"stutter",dur=1)
d2 >> play(PZip("Vs"," i i"),dur=PDur(5,8),sample=0,hpf=var([0,4000],[28,4]),).every(3,"stutter",dur=1)
s1 >> swell((0,2,4,const(6)),dur=4,) + var([0,[1,-1]],8)
d3 >> play("[--]")
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,rate=var([1,4],[28,4]),)
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,rate=var([1,4],[28,4]),chop=32,)
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,rate=var([1,4],[28,4]),chop=32,bits=4,)
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,rate=var([1,4],[28,4]),chop=32,bits=4,slide=PStep(5,-1),)
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sample=-1,rate=var([1,4],[28,4]),chop=8,bits=4,slide=PStep(5,-1),)
d1 >> play(P["x--(-[--])o--o(-=)-"].layer("mirror"),pan=(-1,1),dur=PDur(5,8),sus=1,sample=-1,rate=var([1,4],[28,4]),chop=8,bits=4,slide=PStep(5,-1),)
k2.solo()
k2.solo(0)
p1.stop()
Group(k2, s1, d2, d3).only()
Group(s1, d3).stop()
nextBar(Clock.clear)
| 45.577465
| 150
| 0.536001
| 1,463
| 6,472
| 2.371155
| 0.090226
| 0.026521
| 0.044681
| 0.04324
| 0.836552
| 0.829057
| 0.819833
| 0.816085
| 0.793312
| 0.780917
| 0
| 0.140693
| 0.072002
| 6,472
| 141
| 151
| 45.900709
| 0.436896
| 0.070457
| 0
| 0.159574
| 0
| 0
| 0.090423
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cc2e99720ed641f2f4e761a469c76abf04bc43be
| 24,461
|
py
|
Python
|
django_evolution/tests/test_change_meta.py
|
kamrankalantarli/django-evolution
|
3e67426b189aecca5e470607838d1191f4892859
|
[
"BSD-3-Clause"
] | null | null | null |
django_evolution/tests/test_change_meta.py
|
kamrankalantarli/django-evolution
|
3e67426b189aecca5e470607838d1191f4892859
|
[
"BSD-3-Clause"
] | null | null | null |
django_evolution/tests/test_change_meta.py
|
kamrankalantarli/django-evolution
|
3e67426b189aecca5e470607838d1191f4892859
|
[
"BSD-3-Clause"
] | null | null | null |
"""Unit tests for the ChangeMeta mutation."""
from __future__ import unicode_literals
from django.db import models
from nose import SkipTest
try:
# Django >= 1.11
from django.db.models import Index
except ImportError:
# Django <= 1.10
Index = None
from django_evolution.mutations import ChangeMeta
from django_evolution.support import supports_indexes, supports_index_together
from django_evolution.tests.base_test_case import EvolutionTestCase
class ChangeMetaPlainBaseModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class ChangeMetaIndexesBaseModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
if Index:
indexes = [
Index(fields=['int_field1']),
Index(fields=['char_field1', '-char_field2'],
name='my_custom_index'),
]
class ChangeMetaIndexTogetherBaseModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = [('int_field1', 'char_field1')]
class ChangeMetaUniqueTogetherBaseModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1')]
class ChangeMetaIndexesTests(EvolutionTestCase):
"""Unit tests for ChangeMeta with indexes."""
sql_mapping_key = 'indexes'
DIFF_TEXT = (
"In model tests.TestModel:\n"
" Meta property 'indexes' has changed"
)
@classmethod
def setUpClass(cls):
super(ChangeMetaIndexesTests, cls).setUpClass()
if not supports_indexes:
raise SkipTest('Meta.indexes is not supported on this version '
'of Django')
def test_keeping_empty(self):
"""Testing ChangeMeta(indexes) and keeping list empty"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
indexes = []
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'indexes', []),
],
None,
None,
None,
expect_noop=True)
def test_setting_from_empty(self):
"""Testing ChangeMeta(indexes) and setting to valid list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
indexes = [
Index(fields=['int_field1']),
Index(fields=['char_field1', '-char_field2'],
name='my_custom_index'),
]
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta(
'TestModel',
'indexes',
[
{
'fields': ['int_field1'],
},
{
'fields': ['char_field1', '-char_field2'],
'name': 'my_custom_index',
},
])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'indexes',"
" [{'fields': ['int_field1']},"
" {'fields': ['char_field1', '-char_field2'],"
" 'name': 'my_custom_index'}])"
],
'setting_from_empty')
def test_replace_list(self):
"""Testing ChangeMeta(indexes) and replacing list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
indexes = [
Index(fields=['int_field2']),
]
self.set_base_model(ChangeMetaIndexesBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'indexes',
[{'fields': ['int_field2']}])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'indexes',"
" [{'fields': ['int_field2']}])"
],
'replace_list')
def test_append_list(self):
"""Testing ChangeMeta(indexes) and appending list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
indexes = [
Index(fields=['int_field1']),
Index(fields=['char_field1', '-char_field2'],
name='my_custom_index'),
Index(fields=['int_field2']),
]
self.set_base_model(ChangeMetaIndexesBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta(
'TestModel',
'indexes',
[
{
'fields': ['int_field1'],
},
{
'fields': ['char_field1', '-char_field2'],
'name': 'my_custom_index',
},
{
'fields': ['int_field2'],
},
])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'indexes',"
" [{'fields': ['int_field1']},"
" {'fields': ['char_field1', '-char_field2'],"
" 'name': 'my_custom_index'},"
" {'fields': ['int_field2']}])"
],
'append_list')
def test_removing(self):
"""Testing ChangeMeta(indexes) and removing property"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
self.set_base_model(ChangeMetaIndexesBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'indexes', [])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'indexes', [])"
],
'removing')
def test_missing_indexes(self):
"""Testing ChangeMeta(indexes) and old missing indexes"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
indexes = [
Index(fields=['int_field2']),
]
self.set_base_model(ChangeMetaIndexesBaseModel)
# Remove the indexes from the database state, to simulate the indexes
# not being found in the database. The evolution should still work.
self.database_state.clear_indexes('tests_testmodel')
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'indexes',
[{'fields': ['int_field2']}])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'indexes',"
" [{'fields': ['int_field2']}])"
],
'ignore_missing_indexes',
rescan_indexes=False)
class ChangeMetaIndexTogetherTests(EvolutionTestCase):
"""Unit tests for ChangeMeta with index_together."""
sql_mapping_key = 'index_together'
DIFF_TEXT = (
"In model tests.TestModel:\n"
" Meta property 'index_together' has changed"
)
@classmethod
def setUpClass(cls):
super(ChangeMetaIndexTogetherTests, cls).setUpClass()
if not supports_index_together:
raise SkipTest('Meta.index_together is not supported on this '
'version of Django')
def test_keeping_empty(self):
"""Testing ChangeMeta(index_together) and keeping list empty"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = []
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together', []),
],
None,
None,
None,
expect_noop=True)
def test_setting_from_empty(self):
"""Testing ChangeMeta(index_together) and setting to valid list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = [('int_field1', 'char_field1')]
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together',
[('int_field1', 'char_field1')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'index_together',"
" [('int_field1', 'char_field1')])"
],
'setting_from_empty')
def test_replace_list(self):
"""Testing ChangeMeta(index_together) and replacing list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = [('int_field2', 'char_field2')]
self.set_base_model(ChangeMetaIndexTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together',
[('int_field2', 'char_field2')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'index_together',"
" [('int_field2', 'char_field2')])"
],
'replace_list')
def test_append_list(self):
"""Testing ChangeMeta(index_together) and appending list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = [('int_field1', 'char_field1'),
('int_field2', 'char_field2')]
self.set_base_model(ChangeMetaIndexTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together',
[('int_field1', 'char_field1'),
('int_field2', 'char_field2')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'index_together',"
" [('int_field1', 'char_field1'),"
" ('int_field2', 'char_field2')])"
],
'append_list')
def test_removing(self):
"""Testing ChangeMeta(index_together) and removing property"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
self.set_base_model(ChangeMetaIndexTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together', [])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'index_together', [])"
],
'removing')
def test_missing_indexes(self):
"""Testing ChangeMeta(index_together) and old missing indexes"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = [('char_field1', 'char_field2')]
self.set_base_model(ChangeMetaIndexTogetherBaseModel)
# Remove the indexes from the database state, to simulate the indexes
# not being found in the database. The evolution should still work.
self.database_state.clear_indexes('tests_testmodel')
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together',
[('char_field1', 'char_field2')])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'index_together',"
" [('char_field1', 'char_field2')])"
],
'ignore_missing_indexes',
rescan_indexes=False)
class ChangeMetaUniqueTogetherTests(EvolutionTestCase):
"""Unit tests for ChangeMeta with unique_together."""
sql_mapping_key = 'unique_together'
DIFF_TEXT = (
"In model tests.TestModel:\n"
" Meta property 'unique_together' has changed"
)
def test_keeping_empty(self):
"""Testing ChangeMeta(unique_together) and keeping list empty"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = []
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together', []),
],
None,
None,
None,
expect_noop=True)
def test_setting_from_empty(self):
"""Testing ChangeMeta(unique_together) and setting to valid list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1')]
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field1', 'char_field1')])"
],
'setting_from_empty')
def test_replace_list(self):
"""Testing ChangeMeta(unique_together) and replacing list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field2', 'char_field2')]
self.set_base_model(ChangeMetaUniqueTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field2', 'char_field2')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field2', 'char_field2')])"
],
'replace_list')
def test_append_list(self):
"""Testing ChangeMeta(unique_together) and appending list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1'),
('int_field2', 'char_field2')]
self.set_base_model(ChangeMetaUniqueTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1'),
('int_field2', 'char_field2')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field1', 'char_field1'),"
" ('int_field2', 'char_field2')])"
],
'append_list')
def test_removing(self):
"""Testing ChangeMeta(unique_together) and removing property"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
self.set_base_model(ChangeMetaUniqueTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together', [])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together', [])"
],
'removing')
def test_set_remove(self):
"""Testing ChangeMeta(unique_together) and setting indexes and removing
one
"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1')]
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1'),
('int_field2', 'char_field2')]),
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1')])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field1', 'char_field1')])"
],
'set_remove')
def test_missing_indexes(self):
"""Testing ChangeMeta(unique_together) and old missing indexes"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('char_field1', 'char_field2')]
self.set_base_model(ChangeMetaUniqueTogetherBaseModel)
# Remove the indexes from the database state, to simulate the indexes
# not being found in the database. The evolution should still work.
self.database_state.clear_indexes('tests_testmodel')
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('char_field1', 'char_field2')])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('char_field1', 'char_field2')])"
],
'ignore_missing_indexes',
rescan_indexes=False)
def test_upgrade_from_v1_sig_no_indexes(self):
"""Testing ChangeMeta(unique_together) and upgrade from v1 signature
with no changes and no indexes in database"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1')]
self.set_base_model(ChangeMetaPlainBaseModel)
# Pretend this is an older signature with the same unique_together.
model_sig = (
self.start_sig
.get_app_sig('tests')
.get_model_sig('TestModel')
)
model_sig.unique_together = DestModel._meta.unique_together
model_sig._unique_together_applied = False
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1')])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field1', 'char_field1')])"
],
'upgrade_from_v1_sig',
rescan_indexes=False)
def test_upgrade_from_v1_sig_with_indexes(self):
"""Testing ChangeMeta(unique_together) and upgrade from v1 signature
with no changes and with indexes in database"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1')]
self.set_base_model(ChangeMetaUniqueTogetherBaseModel)
# Pretend this is an older signature with the same unique_together.
model_sig = (
self.start_sig
.get_app_sig('tests')
.get_model_sig('TestModel')
)
model_sig._unique_together_applied = False
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1')])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field1', 'char_field1')])"
],
None,
rescan_indexes=False)
| 34.795164
| 79
| 0.54544
| 2,145
| 24,461
| 5.945921
| 0.062937
| 0.048612
| 0.070566
| 0.094088
| 0.924886
| 0.917124
| 0.899796
| 0.867257
| 0.855653
| 0.836365
| 0
| 0.020635
| 0.352152
| 24,461
| 702
| 80
| 34.844729
| 0.784186
| 0.082785
| 0
| 0.784698
| 0
| 0
| 0.160814
| 0.021556
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040925
| false
| 0
| 0.014235
| 0
| 0.181495
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
04130d9d72f2d608f13024f5b2a9a61a79251c97
| 136
|
py
|
Python
|
numlab/automata/__init__.py
|
jmorgadov/NumLab
|
96a3771837b87132674e65ec3bb1f0ab5f5f089f
|
[
"MIT"
] | 9
|
2022-01-19T22:40:58.000Z
|
2022-02-24T02:39:51.000Z
|
numlab/automata/__init__.py
|
jmorgadov/NumLab
|
96a3771837b87132674e65ec3bb1f0ab5f5f089f
|
[
"MIT"
] | 41
|
2021-11-09T18:22:10.000Z
|
2022-02-06T19:04:23.000Z
|
numlab/automata/__init__.py
|
jmorgadov/NumLab
|
96a3771837b87132674e65ec3bb1f0ab5f5f089f
|
[
"MIT"
] | null | null | null |
from numlab.automata.state import State
from numlab.automata.transition import Transition
from numlab.automata.automata import Automata
| 34
| 49
| 0.867647
| 18
| 136
| 6.555556
| 0.333333
| 0.254237
| 0.457627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 136
| 3
| 50
| 45.333333
| 0.951613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f0cc6880c8e98aa27ccff9238dd4b05cc8c7677f
| 13,419
|
py
|
Python
|
tools/valuation.py
|
zq7734509/mmsegmentation-multi-layer
|
4f2f5309464d1808a7cce78aa83839df85fcd5a0
|
[
"Apache-2.0"
] | 1
|
2022-01-12T07:42:40.000Z
|
2022-01-12T07:42:40.000Z
|
tools/valuation.py
|
zq7734509/mmsegmentation-multi-layer
|
4f2f5309464d1808a7cce78aa83839df85fcd5a0
|
[
"Apache-2.0"
] | null | null | null |
tools/valuation.py
|
zq7734509/mmsegmentation-multi-layer
|
4f2f5309464d1808a7cce78aa83839df85fcd5a0
|
[
"Apache-2.0"
] | 1
|
2022-02-05T17:31:40.000Z
|
2022-02-05T17:31:40.000Z
|
import numpy as np
import os
import glob
from PIL import Image
import cv2 as cv
import os
from sklearn.metrics import confusion_matrix,cohen_kappa_score
from skimage import io
from skimage import measure
from scipy import ndimage
from scipy import misc
from sklearn.metrics import f1_score
from libtiff import TIFF
def evaluateWithBoundary(data_root):
print('---------------------------------------------------------------------------')
print('---------------------------------------------------------------------------')
print('---------------------------------------------------------------------------')
print('---------------------------------------------------------------------------')
print('evaluateWithBoundary')
os.chdir(data_root + 'rgb_img/')
listName = os.listdir(os.getcwd())
classNum = 5
confusion_matrix_total = np.zeros((classNum, classNum), dtype=np.int)
right_total = 0
sum_total = 0
out_txt = open(data_root + 'evaluationWithBoundary.txt', 'w')
num_total = len(listName)
num_cur = 1
for i in listName:
print('Evaluating process: ' + str(num_cur) + '/' + str(num_total))
pred_grey_img = Image.open(data_root + "grey_img/" + i[:-8] + "_pred.png")
pred_grey = np.asarray(pred_grey_img)
gt_grey_img = Image.open(data_root + "grey_img/" + i[:-8] + "_label.png")
gt_grey = np.asarray(gt_grey_img)
out_txt.write(i[:-8] + '_label.png\n')
out_txt.write('\t\tbuild\tcar\tgrass\timp\ttree\n')
print(i[:-8] + '_label.png')
print('\t\tbuild\tcar\tgrass\timp\ttree')
matrix = confusion_matrix(y_true=np.array(gt_grey).flatten(), y_pred=np.array(pred_grey).flatten())
other_label_count = 0
if (gt_grey==0).any() :
other_label_count = matrix[0, :].sum()
other_label_count_1 = matrix[:, 0].sum()
matrix_except_red = matrix[1:, 1:]
else:
matrix_except_red = matrix
matrix_cur = np.zeros((3, classNum), dtype=np.float)
matrix_normalized = np.zeros((classNum, classNum), dtype=np.float)
# normalization
for j in range(classNum):
matrix_normalized[j, :] = matrix_except_red[j, :] / matrix_except_red[j, :].sum()
for j in range(classNum):
precision = np.diag(matrix_except_red)[j] / matrix_except_red[:, j].sum()
recall = np.diag(matrix_except_red)[j] / matrix_except_red[j, :].sum()
F1_score = 2 * precision * recall / (precision + recall)
matrix_cur[0, j] = round(precision, 3)
matrix_cur[1, j] = round(recall, 3)
matrix_cur[2, j] = round(F1_score, 3)
out_txt.write('Precision\t' + str(matrix_cur[0, 0]) + '\t' + str(matrix_cur[0, 1]) + '\t' + str(
matrix_cur[0, 2]) + '\t' + str(matrix_cur[0, 3]) + '\t' + str(matrix_cur[0, 4]) + '\n')
out_txt.write('Recall\t\t' + str(matrix_cur[1, 0]) + '\t' + str(matrix_cur[1, 1]) + '\t' + str(
matrix_cur[1, 2]) + '\t' + str(matrix_cur[1, 3]) + '\t' + str(matrix_cur[1, 4]) + '\n')
out_txt.write('F1_score\t' + str(matrix_cur[2, 0]) + '\t' + str(matrix_cur[2, 1]) + '\t' + str(
matrix_cur[2, 2]) + '\t' + str(matrix_cur[2, 3]) + '\t' + str(matrix_cur[2, 4]) + '\n')
print('Precision\t' + str(matrix_cur[0, 0]) + '\t' + str(matrix_cur[0, 1]) + '\t' + str(
matrix_cur[0, 2]) + '\t' + str(matrix_cur[0, 3]) + '\t' + str(matrix_cur[0, 4]))
print('Recall\t\t' + str(matrix_cur[1, 0]) + '\t' + str(matrix_cur[1, 1]) + '\t' + str(
matrix_cur[1, 2]) + '\t' + str(matrix_cur[1, 3]) + '\t' + str(matrix_cur[1, 4]))
print('F1_score\t' + str(matrix_cur[2, 0]) + '\t' + str(matrix_cur[2, 1]) + '\t' + str(
matrix_cur[2, 2]) + '\t' + str(matrix_cur[2, 3]) + '\t' + str(matrix_cur[2, 4]))
oa = np.diag(matrix_except_red).sum() / (pred_grey.shape[0] * pred_grey.shape[1] - other_label_count)
aaa = (pred_grey.shape[0] * pred_grey.shape[1] - other_label_count)
bbb = matrix_except_red.sum()
out_txt.write('Overall accuracy: ' + str(round(oa, 5)) + '\n')
out_txt.write('-----------------------------------------------------------------------------\n')
out_txt.write('\n')
print('Overall accuracy: ' + str(round(oa, 5)))
print('-----------------------------------------------------------------------------')
print('\n')
confusion_matrix_total += matrix_except_red
right_total += np.diag(matrix_except_red).sum()
sum_total += (pred_grey.shape[0] * pred_grey.shape[1] - other_label_count)
num_cur += 1
out_txt.write('\n')
out_txt.write('\n')
out_txt.write('In Total:\n')
print('In Total:')
matrix_total = np.zeros((3, classNum), dtype=np.float)
for j in range(classNum):
precision = np.diag(confusion_matrix_total)[j] / confusion_matrix_total[:, j].sum()
recall = np.diag(confusion_matrix_total)[j] / confusion_matrix_total[j, :].sum()
F1_score = 2 * precision * recall / (precision + recall)
matrix_total[0, j] = round(precision, 3)
matrix_total[1, j] = round(recall, 3)
matrix_total[2, j] = round(F1_score, 3)
out_txt.write('Precision\t' + str(matrix_total[0, 0]) + '\t' + str(matrix_total[0, 1]) + '\t' + str(
matrix_total[0, 2]) + '\t' + str(matrix_total[0, 3]) + '\t' + str(matrix_total[0, 4]) + '\n')
out_txt.write('Recall\t\t' + str(matrix_total[1, 0]) + '\t' + str(matrix_total[1, 1]) + '\t' + str(
matrix_total[1, 2]) + '\t' + str(matrix_total[1, 3]) + '\t' + str(matrix_total[1, 4]) + '\n')
out_txt.write('F1_score\t' + str(matrix_total[2, 0]) + '\t' + str(matrix_total[2, 1]) + '\t' + str(
matrix_total[2, 2]) + '\t' + str(matrix_total[2, 3]) + '\t' + str(matrix_total[2, 4]) + '\n')
print('Precision\t' + str(matrix_total[0, 0]) + '\t' + str(matrix_total[0, 1]) + '\t' + str(
matrix_total[0, 2]) + '\t' + str(matrix_total[0, 3]) + '\t' + str(matrix_total[0, 4]))
print('Recall\t' + str(matrix_total[1, 0]) + '\t' + str(matrix_total[1, 1]) + '\t' + str(
matrix_total[1, 2]) + '\t' + str(matrix_total[1, 3]) + '\t' + str(matrix_total[1, 4]))
print('F1_score\t' + str(matrix_total[2, 0]) + '\t' + str(matrix_total[2, 1]) + '\t' + str(
matrix_total[2, 2]) + '\t' + str(matrix_total[2, 3]) + '\t' + str(matrix_total[2, 4]))
oa_total = right_total / sum_total
out_txt.write('Overall accuracy: ' + str(round(oa_total, 5)) + '\n')
print('Overall accuracy: ' + str(round(oa_total, 5)))
out_txt.close()
def evaluateWithNoBoundary(data_root):
print('---------------------------------------------------------------------------')
print('---------------------------------------------------------------------------')
print('---------------------------------------------------------------------------')
print('---------------------------------------------------------------------------')
print('evaluateWithNoBoundary')
os.chdir(data_root + 'rgb_img/')
listName = os.listdir(os.getcwd())
classNum = 5
confusion_matrix_total = np.zeros((classNum, classNum), dtype=np.int)
right_total = 0
sum_total = 0
out_txt = open(data_root + 'evaluationWithNoBoundary.txt', 'w')
num_total = len(listName)
num_cur = 1
for i in listName:
print('Evaluating process: ' + str(num_cur) + '/' + str(num_total))
pred_grey_img = Image.open(data_root + "grey_img/" + i[:-8] + "_pred.png")
pred_grey = np.asarray(pred_grey_img)
gt_grey_img = Image.open(data_root + "grey_img/" + i[:-8] + "_label_no_boundary.png")
gt_grey = np.asarray(gt_grey_img)
out_txt.write(i[:-8] + '_label.png\n')
out_txt.write('\t\tbuild\tcar\tgrass\timp\ttree\n')
print(i[:-8] + '_label.png')
print('\t\tbuild\tcar\tgrass\timp\ttree')
matrix = confusion_matrix(y_true=np.array(gt_grey).flatten(), y_pred=np.array(pred_grey).flatten())
matrix_except_red = np.zeros((classNum, classNum), dtype=np.float)
other_label_count = matrix[0, :].sum()
other_label_count_1 = matrix[:, 0].sum()
matrix_except_red = matrix[1:, 1:]
matrix_cur = np.zeros((3, classNum), dtype=np.float)
matrix_normalized = np.zeros((classNum, classNum), dtype=np.float)
# normalization
for j in range(classNum):
matrix_normalized[j, :] = matrix_except_red[j, :] / matrix_except_red[j, :].sum()
for j in range(classNum):
precision = np.diag(matrix_except_red)[j] / matrix_except_red[:, j].sum()
recall = np.diag(matrix_except_red)[j] / matrix_except_red[j, :].sum()
F1_score = 2 * precision * recall / (precision + recall)
matrix_cur[0, j] = round(precision, 3)
matrix_cur[1, j] = round(recall, 3)
matrix_cur[2, j] = round(F1_score, 3)
out_txt.write('Precision\t' + str(matrix_cur[0, 0]) + '\t' + str(matrix_cur[0, 1]) + '\t' + str(
matrix_cur[0, 2]) + '\t' + str(matrix_cur[0, 3]) + '\t' + str(matrix_cur[0, 4]) + '\n')
out_txt.write('Recall\t\t' + str(matrix_cur[1, 0]) + '\t' + str(matrix_cur[1, 1]) + '\t' + str(
matrix_cur[1, 2]) + '\t' + str(matrix_cur[1, 3]) + '\t' + str(matrix_cur[1, 4]) + '\n')
out_txt.write('F1_score\t' + str(matrix_cur[2, 0]) + '\t' + str(matrix_cur[2, 1]) + '\t' + str(
matrix_cur[2, 2]) + '\t' + str(matrix_cur[2, 3]) + '\t' + str(matrix_cur[2, 4]) + '\n')
print('Precision\t' + str(matrix_cur[0, 0]) + '\t' + str(matrix_cur[0, 1]) + '\t' + str(
matrix_cur[0, 2]) + '\t' + str(matrix_cur[0, 3]) + '\t' + str(matrix_cur[0, 4]))
print('Recall\t\t' + str(matrix_cur[1, 0]) + '\t' + str(matrix_cur[1, 1]) + '\t' + str(
matrix_cur[1, 2]) + '\t' + str(matrix_cur[1, 3]) + '\t' + str(matrix_cur[1, 4]))
print('F1_score\t' + str(matrix_cur[2, 0]) + '\t' + str(matrix_cur[2, 1]) + '\t' + str(
matrix_cur[2, 2]) + '\t' + str(matrix_cur[2, 3]) + '\t' + str(matrix_cur[2, 4]))
oa = np.diag(matrix_except_red).sum() / (pred_grey.shape[0] * pred_grey.shape[1] - other_label_count)
out_txt.write('Overall accuracy: ' + str(round(oa, 5)) + '\n')
out_txt.write('-----------------------------------------------------------------------------\n')
out_txt.write('\n')
print('Overall accuracy: ' + str(round(oa, 5)))
print('-----------------------------------------------------------------------------')
print('\n')
confusion_matrix_total += matrix_except_red
right_total += np.diag(matrix_except_red).sum()
sum_total += (pred_grey.shape[0] * pred_grey.shape[1] - other_label_count)
num_cur += 1
out_txt.write('\n')
out_txt.write('\n')
out_txt.write('In Total:\n')
print('In Total:')
matrix_total = np.zeros((3, classNum), dtype=np.float)
for j in range(classNum):
precision = np.diag(confusion_matrix_total)[j] / confusion_matrix_total[:, j].sum()
recall = np.diag(confusion_matrix_total)[j] / confusion_matrix_total[j, :].sum()
F1_score = 2 * precision * recall / (precision + recall)
matrix_total[0, j] = round(precision, 3)
matrix_total[1, j] = round(recall, 3)
matrix_total[2, j] = round(F1_score, 3)
out_txt.write('Precision\t' + str(matrix_total[0, 0]) + '\t' + str(matrix_total[0, 1]) + '\t' + str(
matrix_total[0, 2]) + '\t' + str(matrix_total[0, 3]) + '\t' + str(matrix_total[0, 4]) + '\n')
out_txt.write('Recall\t\t' + str(matrix_total[1, 0]) + '\t' + str(matrix_total[1, 1]) + '\t' + str(
matrix_total[1, 2]) + '\t' + str(matrix_total[1, 3]) + '\t' + str(matrix_total[1, 4]) + '\n')
out_txt.write('F1_score\t' + str(matrix_total[2, 0]) + '\t' + str(matrix_total[2, 1]) + '\t' + str(
matrix_total[2, 2]) + '\t' + str(matrix_total[2, 3]) + '\t' + str(matrix_total[2, 4]) + '\n')
print('Precision\t' + str(matrix_total[0, 0]) + '\t' + str(matrix_total[0, 1]) + '\t' + str(
matrix_total[0, 2]) + '\t' + str(matrix_total[0, 3]) + '\t' + str(matrix_total[0, 4]))
print('Recall\t' + str(matrix_total[1, 0]) + '\t' + str(matrix_total[1, 1]) + '\t' + str(
matrix_total[1, 2]) + '\t' + str(matrix_total[1, 3]) + '\t' + str(matrix_total[1, 4]))
print('F1_score\t' + str(matrix_total[2, 0]) + '\t' + str(matrix_total[2, 1]) + '\t' + str(
matrix_total[2, 2]) + '\t' + str(matrix_total[2, 3]) + '\t' + str(matrix_total[2, 4]))
oa_total = right_total / sum_total
out_txt.write('Overall accuracy: ' + str(round(oa_total, 5)) + '\n')
print('Overall accuracy: ' + str(round(oa_total, 5)))
out_txt.close()
if __name__ == '__main__':
data_root = '/media/allen/orange/00第二篇论文实验结果/15.upernet_swin_base_potsdam_normal_boundary_loss_160k/'
need_transform_label_from_rgb2grey = True
if need_transform_label_from_rgb2grey:
os.system(
'sh /home/allen/Documents/MATLAB/run_transform_RGBLabel2Grey.sh /usr/local/MATLAB/MATLAB_Runtime/v93/ ' + data_root)
evaluateWithBoundary(data_root)
evaluateWithNoBoundary(data_root)
os.system(
'sh /home/allen/Documents/MATLAB/run_generate_red_green.sh /usr/local/MATLAB/MATLAB_Runtime/v93/ ' + data_root)
| 57.840517
| 128
| 0.547209
| 1,899
| 13,419
| 3.647709
| 0.07425
| 0.069294
| 0.173235
| 0.112603
| 0.899957
| 0.891295
| 0.891295
| 0.876714
| 0.866031
| 0.854482
| 0
| 0.032962
| 0.204188
| 13,419
| 232
| 129
| 57.840517
| 0.615694
| 0.002012
| 0
| 0.839623
| 0
| 0.009434
| 0.171023
| 0.105751
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009434
| false
| 0
| 0.061321
| 0
| 0.070755
| 0.179245
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9bd8e21ad2dc279c477c29020446adc29cf8800f
| 2,009
|
py
|
Python
|
Note/UBX_Checksum.py
|
lsangild/ITAMS
|
d23841b09ff5eab38dd533aa2428ca9f25557d24
|
[
"MIT"
] | null | null | null |
Note/UBX_Checksum.py
|
lsangild/ITAMS
|
d23841b09ff5eab38dd533aa2428ca9f25557d24
|
[
"MIT"
] | null | null | null |
Note/UBX_Checksum.py
|
lsangild/ITAMS
|
d23841b09ff5eab38dd533aa2428ca9f25557d24
|
[
"MIT"
] | null | null | null |
#buffer = [0xB5, 0x62, 0x06, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0xD0, 0x08, 0x00, 0x00, 0x80, 0x25, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x09, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x3B, 0x2C, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x3B, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x11, 0x02, 0x00, 0x08, 0x01]
#buffer = [0xB5, 0x62, 0x06, 0x11, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x3E, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x3E, 0x24, 0x00, 0x00, 0x16, 0x16, 0x04, 0x00, 0x04, 0xff, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x08, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x24, 0x24, 0x00, 0x01, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x09, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0xF0, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x09, 0x0C, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
#buffer = [0xB5, 0x62, 0x06, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0xD0, 0x08, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00]
buffer = [0xB5, 0x62, 0x05, 0x01, 0x02, 0x00, 0x06, 0x00]
ca = 0
cb = 0
for x in range(2,len(buffer)):
ca = 0xff & (ca + buffer[x])
cb = 0xff & (cb + ca)
print(hex(ca))
print(hex(cb))
| 91.318182
| 310
| 0.65107
| 328
| 2,009
| 3.987805
| 0.121951
| 0.825688
| 1.009174
| 1.125382
| 0.807339
| 0.772171
| 0.70948
| 0.636086
| 0.636086
| 0.607034
| 0
| 0.498499
| 0.171229
| 2,009
| 22
| 311
| 91.318182
| 0.287087
| 0.894475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
9be3f8b163f9164b3b7bac7d44357789f75021e3
| 6,716
|
py
|
Python
|
tests/integration_tests/test_drivers_license.py
|
JobtechSwe/elastic-importers
|
ae370984f79295a784350f98e695977a1f73647a
|
[
"Apache-2.0"
] | 2
|
2020-03-02T18:29:08.000Z
|
2021-06-09T00:48:24.000Z
|
tests/integration_tests/test_drivers_license.py
|
JobtechSwe/elastic-importers
|
ae370984f79295a784350f98e695977a1f73647a
|
[
"Apache-2.0"
] | 12
|
2019-01-31T09:54:23.000Z
|
2021-10-04T11:25:44.000Z
|
tests/integration_tests/test_drivers_license.py
|
JobtechSwe/elastic-importers
|
ae370984f79295a784350f98e695977a1f73647a
|
[
"Apache-2.0"
] | 1
|
2018-11-09T14:44:55.000Z
|
2018-11-09T14:44:55.000Z
|
import pytest
from importers.platsannons import converter
drivers_licenses_test_data = [
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'C1', 'varde': 'swP6_psb_FCB'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'swP6_psb_FCB', 'legacy_ams_taxonomy_id': '12', 'label': 'C1'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'D', 'varde': 'hK1a_wsQ_4UG'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'hK1a_wsQ_4UG', 'legacy_ams_taxonomy_id': '5', 'label': 'D'}]},
{'input': {'korkort': [{'namn': 'AM', 'varde': '4HpY_e2U_TUH'}]},
'expected': [{'concept_id': '4HpY_e2U_TUH', 'legacy_ams_taxonomy_id': '16', 'label': 'AM'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'Utökad B', 'varde': 'ftCQ_gFu_L4b'},
{'namn': 'BE', 'varde': 'bcFd_Vkt_KXL'}, {'namn': 'C1', 'varde': 'swP6_psb_FCB'},
{'namn': 'C', 'varde': 'BKCx_hST_Pcm'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'ftCQ_gFu_L4b', 'legacy_ams_taxonomy_id': '18', 'label': 'Utökad B'},
{'concept_id': 'bcFd_Vkt_KXL', 'legacy_ams_taxonomy_id': '6', 'label': 'BE'},
{'concept_id': 'swP6_psb_FCB', 'legacy_ams_taxonomy_id': '12', 'label': 'C1'},
{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'}]},
{'input': {'korkort': [{'namn': 'BE', 'varde': 'bcFd_Vkt_KXL'}, {'namn': 'CE', 'varde': 'zZu8_iZ9_wMH'}]},
'expected': [{'concept_id': 'bcFd_Vkt_KXL', 'legacy_ams_taxonomy_id': '6', 'label': 'BE'},
{'concept_id': 'zZu8_iZ9_wMH', 'legacy_ams_taxonomy_id': '7', 'label': 'CE'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'C', 'varde': 'BKCx_hST_Pcm'},
{'namn': 'CE', 'varde': 'zZu8_iZ9_wMH'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'},
{'concept_id': 'zZu8_iZ9_wMH', 'legacy_ams_taxonomy_id': '7', 'label': 'CE'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'C', 'varde': 'BKCx_hST_Pcm'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'AM', 'varde': '4HpY_e2U_TUH'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': '4HpY_e2U_TUH', 'legacy_ams_taxonomy_id': '16', 'label': 'AM'}]},
{'input': {'korkort': [{'namn': 'CE', 'varde': 'zZu8_iZ9_wMH'}]},
'expected': [{'concept_id': 'zZu8_iZ9_wMH', 'legacy_ams_taxonomy_id': '7', 'label': 'CE'}]},
{'input': {'korkort': [{'namn': 'BE', 'varde': 'bcFd_Vkt_KXL'}, {'namn': 'B', 'varde': 'VTK8_WRx_GcM'}]},
'expected': [{'concept_id': 'bcFd_Vkt_KXL', 'legacy_ams_taxonomy_id': '6', 'label': 'BE'},
{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'}]},
{'input': {'korkort': [{'namn': 'C', 'varde': 'BKCx_hST_Pcm'}]},
'expected': [{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'}]},
{'input': {'korkort': [{'namn': 'C1', 'varde': 'swP6_psb_FCB'}]},
'expected': [{'concept_id': 'swP6_psb_FCB', 'legacy_ams_taxonomy_id': '12', 'label': 'C1'}]},
{'input': {'korkort': [{'namn': 'C', 'varde': 'BKCx_hST_Pcm'}, {'namn': 'B', 'varde': 'VTK8_WRx_GcM'}]},
'expected': [{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'},
{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'CE', 'varde': 'zZu8_iZ9_wMH'},
{'namn': 'C', 'varde': 'BKCx_hST_Pcm'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'zZu8_iZ9_wMH', 'legacy_ams_taxonomy_id': '7', 'label': 'CE'},
{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'}]},
{'input': {'korkort': [{'namn': 'Utökad B', 'varde': 'ftCQ_gFu_L4b'}]},
'expected': [{'concept_id': 'ftCQ_gFu_L4b', 'legacy_ams_taxonomy_id': '18', 'label': 'Utökad B'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'BE', 'varde': 'bcFd_Vkt_KXL'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'bcFd_Vkt_KXL', 'legacy_ams_taxonomy_id': '6', 'label': 'BE'}]},
{'input': {'korkort': [{'namn': 'C', 'varde': 'BKCx_hST_Pcm'}, {'namn': 'CE', 'varde': 'zZu8_iZ9_wMH'}]},
'expected': [{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'},
{'concept_id': 'zZu8_iZ9_wMH', 'legacy_ams_taxonomy_id': '7', 'label': 'CE'}]},
{'input': {'korkort': [{'namn': 'A', 'varde': 'hK8X_cX9_5P4'}]},
'expected': [{'concept_id': 'hK8X_cX9_5P4', 'legacy_ams_taxonomy_id': '10', 'label': 'A'}]},
{'input': {'korkort': [{'namn': 'BE', 'varde': 'bcFd_Vkt_KXL'}]},
'expected': [{'concept_id': 'bcFd_Vkt_KXL', 'legacy_ams_taxonomy_id': '6', 'label': 'BE'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'Utökad B', 'varde': 'ftCQ_gFu_L4b'},
{'namn': 'C1', 'varde': 'swP6_psb_FCB'}, {'namn': 'C', 'varde': 'BKCx_hST_Pcm'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'ftCQ_gFu_L4b', 'legacy_ams_taxonomy_id': '18', 'label': 'Utökad B'},
{'concept_id': 'swP6_psb_FCB', 'legacy_ams_taxonomy_id': '12', 'label': 'C1'},
{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'}]},
]
@pytest.mark.parametrize("test_case", drivers_licenses_test_data)
def test_drivers_license(test_case):
converted = converter.parse_driving_licence(test_case['input'])
assert converted == test_case['expected']
| 85.012658
| 113
| 0.557028
| 846
| 6,716
| 4.017731
| 0.089835
| 0.108561
| 0.20506
| 0.229185
| 0.888791
| 0.888791
| 0.883789
| 0.872315
| 0.865254
| 0.765813
| 0
| 0.024814
| 0.177933
| 6,716
| 78
| 114
| 86.102564
| 0.590835
| 0
| 0
| 0.4
| 0
| 0
| 0.523973
| 0.134306
| 0
| 0
| 0
| 0
| 0.013333
| 1
| 0.013333
| false
| 0
| 0.026667
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
501e3eb7516750df33f83d9929a307b858b4bed5
| 158
|
py
|
Python
|
jiminy/gym/envs/parameter_tuning/__init__.py
|
sibeshkar/jiminy
|
7754f86fb0f246e7d039ea0cbfd9950fcae4adfb
|
[
"MIT"
] | 3
|
2020-03-16T13:50:40.000Z
|
2021-06-09T05:26:13.000Z
|
jiminy/gym/envs/parameter_tuning/__init__.py
|
sibeshkar/jiminy
|
7754f86fb0f246e7d039ea0cbfd9950fcae4adfb
|
[
"MIT"
] | null | null | null |
jiminy/gym/envs/parameter_tuning/__init__.py
|
sibeshkar/jiminy
|
7754f86fb0f246e7d039ea0cbfd9950fcae4adfb
|
[
"MIT"
] | null | null | null |
from jiminy.gym.envs.parameter_tuning.convergence import ConvergenceControl
from jiminy.gym.envs.parameter_tuning.train_deep_cnn import CNNClassifierTraining
| 52.666667
| 81
| 0.898734
| 20
| 158
| 6.9
| 0.65
| 0.144928
| 0.188406
| 0.246377
| 0.463768
| 0.463768
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050633
| 158
| 2
| 82
| 79
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
acdc2e4a327a4070da7b1134a24eef4e7fc77299
| 1,905
|
py
|
Python
|
data_log/migrations/0011_auto_20190427_1159.py
|
Itori/swarfarm
|
7192e2d8bca093b4254023bbec42b6a2b1887547
|
[
"Apache-2.0"
] | 66
|
2017-09-11T04:46:00.000Z
|
2021-03-13T00:02:42.000Z
|
data_log/migrations/0011_auto_20190427_1159.py
|
Itori/swarfarm
|
7192e2d8bca093b4254023bbec42b6a2b1887547
|
[
"Apache-2.0"
] | 133
|
2017-09-24T21:28:59.000Z
|
2021-04-02T10:35:31.000Z
|
data_log/migrations/0011_auto_20190427_1159.py
|
Itori/swarfarm
|
7192e2d8bca093b4254023bbec42b6a2b1887547
|
[
"Apache-2.0"
] | 28
|
2017-08-30T19:04:32.000Z
|
2020-11-16T04:09:00.000Z
|
# Generated by Django 2.1.7 on 2019-04-27 18:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data_log', '0010_auto_20190420_2332'),
]
operations = [
migrations.AlterModelOptions(
name='craftrunelog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp', '-pk')},
),
migrations.AlterModelOptions(
name='dungeonlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp', '-pk')},
),
migrations.AlterModelOptions(
name='fulllog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp', '-pk')},
),
migrations.AlterModelOptions(
name='magicboxcraft',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp', '-pk')},
),
migrations.AlterModelOptions(
name='riftdungeonlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp', '-pk')},
),
migrations.AlterModelOptions(
name='riftraidlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp', '-pk')},
),
migrations.AlterModelOptions(
name='shoprefreshlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp', '-pk')},
),
migrations.AlterModelOptions(
name='summonlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp', '-pk')},
),
migrations.AlterModelOptions(
name='wishlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp', '-pk')},
),
migrations.AlterModelOptions(
name='worldbosslog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp', '-pk')},
),
]
| 35.277778
| 86
| 0.549606
| 149
| 1,905
| 6.865772
| 0.295302
| 0.26393
| 0.30303
| 0.175953
| 0.722385
| 0.722385
| 0.722385
| 0.722385
| 0.677419
| 0.677419
| 0
| 0.022595
| 0.27979
| 1,905
| 53
| 87
| 35.943396
| 0.723032
| 0.023622
| 0
| 0.638298
| 1
| 0
| 0.306781
| 0.012379
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c57e8f8342e120782e66cbf0b96a0f52f1c5b346
| 593
|
py
|
Python
|
autonomous_tello/scripts/tello/square2.py
|
moshes7/tello
|
a313759662649e43f53d83c1cae4daa8cd86dd2d
|
[
"MIT"
] | 2
|
2019-10-28T07:18:06.000Z
|
2020-12-21T15:58:30.000Z
|
autonomous_tello/scripts/tello/square2.py
|
moshes7/tello
|
a313759662649e43f53d83c1cae4daa8cd86dd2d
|
[
"MIT"
] | null | null | null |
autonomous_tello/scripts/tello/square2.py
|
moshes7/tello
|
a313759662649e43f53d83c1cae4daa8cd86dd2d
|
[
"MIT"
] | null | null | null |
from djitellopy import Tello
import time
# parameters
step = 100
sleepTime = 8
tello = Tello()
tello.connect()
tello.takeoff()
time.sleep(sleepTime)
tello.move_forward(step)
time.sleep(sleepTime)
tello.rotate_clockwise(90)
time.sleep(sleepTime)
tello.move_forward(step)
time.sleep(sleepTime)
tello.rotate_clockwise(90)
time.sleep(sleepTime)
tello.move_forward(step)
time.sleep(sleepTime)
tello.rotate_clockwise(90)
time.sleep(sleepTime)
tello.move_forward(step)
time.sleep(sleepTime)
tello.rotate_clockwise(90)
time.sleep(sleepTime)
tello.land()
time.sleep(sleepTime)
tello.end()
| 14.119048
| 28
| 0.790894
| 83
| 593
| 5.554217
| 0.240964
| 0.195228
| 0.390456
| 0.498915
| 0.726681
| 0.726681
| 0.726681
| 0.726681
| 0.726681
| 0.726681
| 0
| 0.022181
| 0.08769
| 593
| 42
| 29
| 14.119048
| 0.829945
| 0.016863
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c5ac4cec90ee75f3a7e8965f67a23996301b8520
| 11,234
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowInterfaces/cli/equal/golden_interface_output_10_expected.py
|
nielsvanhooy/genieparser
|
9a1955749697a6777ca614f0af4d5f3a2c254ccd
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowInterfaces/cli/equal/golden_interface_output_10_expected.py
|
nielsvanhooy/genieparser
|
9a1955749697a6777ca614f0af4d5f3a2c254ccd
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowInterfaces/cli/equal/golden_interface_output_10_expected.py
|
nielsvanhooy/genieparser
|
9a1955749697a6777ca614f0af4d5f3a2c254ccd
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
"Tunnel10": {
"bandwidth": 100,
"counters": {
"in_abort": 0,
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"last_clear": "17:00:12",
"out_broadcast_pkts": 0,
"out_buffer_failure": 0,
"out_buffers_swapped": 0,
"out_collision": 0,
"out_errors": 0,
"out_interface_resets": 0,
"out_multicast_pkts": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0
},
},
"delay": 50000,
"enabled": True,
"encapsulations": {
"encapsulation": "tunnel"
},
"ipv4": {
"1.1.1.3/24": {
"ip": "1.1.1.3", "prefix_length": "24"
},
},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"mtu": 9980,
"oper_status": "down",
"output_hang": "never",
"port_channel": {
"port_channel_member": False
},
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 0,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0
},
"reliability": "255/255",
"rxload": "1/255",
"tunnel_destination_ip": "1.1.10.11",
"tunnel_protocol": "AURP",
"tunnel_receive_bandwidth": 1000000,
"tunnel_source_ip": "1.1.10.10",
"tunnel_transmit_bandwidth": 10000000,
"tunnel_transport_mtu": 1480,
"tunnel_ttl": 255,
"txload": "1/255",
"type": "Tunnel"
},
"Tunnel4": {
"bandwidth": 100,
"counters": {
"in_abort": 0,
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"last_clear": "00:02:56",
"out_broadcast_pkts": 0,
"out_buffer_failure": 0,
"out_buffers_swapped": 0,
"out_collision": 0,
"out_errors": 0,
"out_interface_resets": 0,
"out_multicast_pkts": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0
},
},
"delay": 50000,
"enabled": True,
"encapsulations": {
"encapsulation": "tunnel"
},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"mtu": 9976,
"oper_status": "down",
"output_hang": "never",
"port_channel": {
"port_channel_member": False
},
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 0,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0
},
"reliability": "255/255",
"rxload": "1/255",
"tunnel_protocol": "GRE/IP",
"tunnel_receive_bandwidth": 8000,
"tunnel_source_ip": "192.168.1.100",
"tunnel_transmit_bandwidth": 8000,
"tunnel_transport_mtu": 1476,
"tunnel_ttl": 255,
"txload": "1/255",
"type": "Tunnel"
},
"Tunnel5": {
"bandwidth": 100,
"counters": {
"in_abort": 0,
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"last_clear": "00:01:30",
"out_broadcast_pkts": 0,
"out_buffer_failure": 0,
"out_buffers_swapped": 0,
"out_collision": 0,
"out_errors": 0,
"out_interface_resets": 0,
"out_multicast_pkts": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0
},
},
"delay": 50000,
"enabled": True,
"encapsulations": {
"encapsulation": "tunnel"
},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"mtu": 9976,
"oper_status": "down",
"output_hang": "never",
"port_channel": {
"port_channel_member": False
},
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 0,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0
},
"reliability": "255/255",
"rxload": "1/255",
"tunnel_destination_ip": "7.7.7.8",
"tunnel_protocol": "GRE/IP",
"tunnel_receive_bandwidth": 8000,
"tunnel_source_ip": "7.7.7.7",
"tunnel_source_interface": 'Loopback100',
"tunnel_transmit_bandwidth": 8000,
"tunnel_transport_mtu": 1476,
"tunnel_ttl": 255,
"txload": "1/255",
"type": "Tunnel"
},
"Tunnel6": {
"bandwidth": 100,
"counters": {
"in_abort": 0,
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"last_clear": "00:00:38",
"out_broadcast_pkts": 0,
"out_buffer_failure": 0,
"out_buffers_swapped": 0,
"out_collision": 0,
"out_errors": 0,
"out_interface_resets": 0,
"out_multicast_pkts": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0
},
},
"delay": 50000,
"enabled": True,
"encapsulations": {
"encapsulation": "tunnel"
},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"mtu": 9976,
"oper_status": "down",
"output_hang": "never",
"port_channel": {
"port_channel_member": False
},
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 0,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0
},
"reliability": "255/255",
"rxload": "1/255",
"tunnel_destination_ip": "1.2.3.4",
"tunnel_protocol": "GRE/IP",
"tunnel_receive_bandwidth": 8000,
"tunnel_source_ip": "UNKNOWN",
"tunnel_transmit_bandwidth": 8000,
"tunnel_transport_mtu": 1476,
"tunnel_ttl": 255,
"txload": "1/255",
"type": "Tunnel"
},
"Tunnel7": {
"bandwidth": 100,
"counters": {
"in_abort": 0,
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"last_clear": "00:00:45",
"out_broadcast_pkts": 0,
"out_buffer_failure": 0,
"out_buffers_swapped": 0,
"out_collision": 0,
"out_errors": 0,
"out_interface_resets": 0,
"out_multicast_pkts": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0
},
},
"delay": 50000,
"enabled": True,
"encapsulations": {
"encapsulation": "tunnel"
},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"mtu": 9976,
"oper_status": "down",
"output_hang": "never",
"port_channel": {
"port_channel_member": False
},
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 0,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0
},
"reliability": "255/255",
"rxload": "1/255",
"tunnel_protocol": "GRE/IP",
"tunnel_receive_bandwidth": 8000,
"tunnel_source_ip": "9.45.21.231",
"tunnel_source_interface": 'GigabitEthernet2',
"tunnel_transmit_bandwidth": 8000,
"tunnel_transport_mtu": 1476,
"tunnel_ttl": 255,
"txload": "1/255",
"type": "Tunnel"
}
}
| 30.198925
| 54
| 0.44668
| 1,101
| 11,234
| 4.197094
| 0.111717
| 0.045445
| 0.022722
| 0.023804
| 0.928154
| 0.928154
| 0.928154
| 0.928154
| 0.921229
| 0.921229
| 0
| 0.077283
| 0.410272
| 11,234
| 372
| 55
| 30.198925
| 0.620226
| 0
| 0
| 0.833333
| 0
| 0
| 0.415487
| 0.042635
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a84c82f2780fa22fdd61240334d6a8a8ba177503
| 5,635
|
py
|
Python
|
models/layers/InferenceNetwork.py
|
gamerDecathlete/NormalizingFlowsNMT
|
5bff15058cad26c52002c026daa906b5aa8b894b
|
[
"MIT"
] | null | null | null |
models/layers/InferenceNetwork.py
|
gamerDecathlete/NormalizingFlowsNMT
|
5bff15058cad26c52002c026daa906b5aa8b894b
|
[
"MIT"
] | null | null | null |
models/layers/InferenceNetwork.py
|
gamerDecathlete/NormalizingFlowsNMT
|
5bff15058cad26c52002c026daa906b5aa8b894b
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
import torch
class SourceInferenceNetwork(nn.Module):
"""Encodes 2 sequences and generates sets of parameters based on requested values"""
def __init__(self, src_size, hidden_size, dist_x, use_avg=True, num_layers=1, dropout=0.):
super(SourceInferenceNetwork, self).__init__()
self.num_layers = num_layers
self.src_rnn = nn.GRU(src_size, hidden_size, num_layers,
batch_first=True, bidirectional=True, dropout=dropout)
self.use_avg = use_avg
self.X = None
self.dist_x = dist_x
def meanPool(self, hidden_states, lengths):
return torch.sum(hidden_states,dim=1) / lengths.float().unsqueeze(1)
def generate_output(self, embeds, lengths, mask, network, pad_pack, ):
if pad_pack:
packed = pack_padded_sequence(embeds, lengths, batch_first=True)
else:
packed = embeds
output, final = network(packed)
if pad_pack:
output, _ = pad_packed_sequence(output, batch_first=True)
else:
if len(mask.size()) < 3:
#this ...is weird...
mask = mask.unsqueeze(2)
output = output * mask.float()
fwd_final = final[0:final.size(0):2]
bwd_final = final[1:final.size(0):2]
final = torch.cat([fwd_final, bwd_final], dim=2) # [num_layers, batch, 2*dim]
return output, final
def forward(self, x, x_mask, x_lengths, pad_pack_x=True):
"""
Applies a bidirectional GRU to sequence of embeddings x.
The input mini-batch x needs to be sorted by length.
x should have dimensions [batch, time, dim].
"""
#mask out values beyond length of sequence
#FYI, if you try to use final without pad_packing, you're gonna have a bad time
#Your hidden states for padded sequences will progressively have more uninformative information
#because w/o PackedSequence RNNS just keep feeding inputs in and if your padding is a 0 vector
#you'll just keep dampening the hidden state
x_output, x_final = self.generate_output(x, x_lengths, x_mask, self.src_rnn, pad_pack_x)
if self.use_avg:
X = self.meanPool(x_output, x_lengths)
else:
X = x_final.squeeze(0)
# we need to manually concatenate the final states for both directions
mu_x, sig_x = self.dist_x(X)
return mu_x, sig_x, X
class SourceTargetInferenceNetwork(nn.Module):
"""Encodes 2 sequences and generates sets of parameters based on requested values"""
def __init__(self, src_size, trg_size, hidden_size, dist_xy, dist_x, use_avg=True, share_params=False, num_layers=1, dropout=0.):
super(SourceTargetInferenceNetwork, self).__init__()
self.num_layers = num_layers
self.src_rnn = nn.GRU(src_size, hidden_size, num_layers,
batch_first=True, bidirectional=True, dropout=dropout)
self.trg_rnn = nn.GRU(trg_size, hidden_size, num_layers,
batch_first=True, bidirectional=True, dropout=dropout) if not share_params else self.src_rnn
self.use_avg = use_avg
self.X = None
self.Y = None
self.dist_xy = dist_xy
self.dist_x = dist_x
def meanPool(self, hidden_states, lengths):
return torch.sum(hidden_states,dim=1) / lengths.float().unsqueeze(1)
def generate_output(self, embeds, lengths, mask, network, pad_pack, ):
if pad_pack:
packed = pack_padded_sequence(embeds, lengths, batch_first=True)
else:
packed = embeds
output, final = network(packed)
if pad_pack:
output, _ = pad_packed_sequence(output, batch_first=True)
else:
if len(mask.size()) < 3:
#this ...is weird...
mask = mask.unsqueeze(2)
output = output * mask.float()
fwd_final = final[0:final.size(0):2]
bwd_final = final[1:final.size(0):2]
final = torch.cat([fwd_final, bwd_final], dim=2) # [num_layers, batch, 2*dim]
return output, final
def forward(self, x, x_mask, x_lengths, y, y_mask, y_lengths, pad_pack_x=True, pad_pack_y=False):
"""
Applies a bidirectional GRU to sequence of embeddings x.
The input mini-batch x needs to be sorted by length.
x should have dimensions [batch, time, dim].
"""
#mask out values beyond length of sequence
#FYI, if you try to use final without pad_packing, you're gonna have a bad time
#Your hidden states for padded sequences will progressively have more uninformative information
#because w/o PackedSequence RNNS just keep feeding inputs in and if your padding is a 0 vector
#you'll just keep dampening the hidden state
x_output, x_final = self.generate_output(x, x_lengths, x_mask, self.src_rnn, pad_pack_x)
y_output, y_final = self.generate_output(y, y_lengths, y_mask, self.trg_rnn, pad_pack_y)
if self.use_avg:
X = self.meanPool(x_output, x_lengths)
Y = self.meanPool(y_output, y_lengths)
else:
X = x_final
Y = y_final
XY = torch.cat([X, Y], dim=1)
# we need to manually concatenate the final states for both directions
mu_xy, sig_xy = self.dist_xy(XY)
mu_x, sig_x = self.dist_x(X)
return mu_xy, sig_xy, mu_x, sig_x, XY
| 39.683099
| 133
| 0.632476
| 796
| 5,635
| 4.271357
| 0.174623
| 0.024706
| 0.028824
| 0.021176
| 0.859706
| 0.83
| 0.816471
| 0.816471
| 0.816471
| 0.801765
| 0
| 0.008389
| 0.280745
| 5,635
| 142
| 134
| 39.683099
| 0.830496
| 0.248447
| 0
| 0.707317
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.036585
| 0.02439
| 0.231707
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a8cacb9aa6de56048c5521019dcc7e369bfac05d
| 61
|
py
|
Python
|
python/top_level_imports/not_how_to_do_it/util/helper.py
|
tardate/LittleCodingKata
|
25f37f2a422d1f63a7d03b25a7876d6fa707cf7a
|
[
"MIT"
] | 8
|
2017-06-02T05:12:11.000Z
|
2022-01-09T02:50:55.000Z
|
python/top_level_imports/how_to_do_it/util/helper.py
|
tardate/LittleCodingKata
|
25f37f2a422d1f63a7d03b25a7876d6fa707cf7a
|
[
"MIT"
] | 34
|
2021-03-09T00:55:40.000Z
|
2022-03-29T05:54:38.000Z
|
python/top_level_imports/how_to_do_it/util/helper.py
|
tardate/LittleCodingKata
|
25f37f2a422d1f63a7d03b25a7876d6fa707cf7a
|
[
"MIT"
] | 3
|
2016-06-15T10:13:12.000Z
|
2022-01-09T02:51:16.000Z
|
import constants
def message():
return constants.HELLO
| 10.166667
| 26
| 0.737705
| 7
| 61
| 6.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196721
| 61
| 5
| 27
| 12.2
| 0.918367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
766e42e7e6196667cb12ab3f950c95f258078054
| 59,449
|
py
|
Python
|
policy/feudalRL/DIP_parametrisation.py
|
helloric/pydial3
|
34988f4592c4e28388b2818de8768d841696efbb
|
[
"Apache-2.0"
] | null | null | null |
policy/feudalRL/DIP_parametrisation.py
|
helloric/pydial3
|
34988f4592c4e28388b2818de8768d841696efbb
|
[
"Apache-2.0"
] | null | null | null |
policy/feudalRL/DIP_parametrisation.py
|
helloric/pydial3
|
34988f4592c4e28388b2818de8768d841696efbb
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
Class to convert belief states into DIP parametrisations
'''
import numpy as np
import copy
from itertools import product
from scipy.stats import entropy
from policy.Policy import Policy, Action, State, TerminalAction, TerminalState
from ontology import Ontology
from utils import Settings, ContextLogger, DialogueState
logger = ContextLogger.getLogger('')
class DIP_state(State):
def __init__(self, belief, domainString=None, action_freq=None):
#params
self.domainString = domainString
self.N_bins = 10
self.slots = list(Ontology.global_ontology.get_informable_slots(domainString))
if 'price' in self.slots:
self.slots.remove('price') #remove price from SFR ont, its not used
if 'name' in self.slots:
self.slots.remove('name')
self.DIP_state = {'general':None, 'joint':None}
for slot in self.slots:
self.DIP_state[slot]=None
# convert belief state into DIP params
if action_freq is not None:
self.DIP_state['general'] = np.concatenate((action_freq,self.convert_general_b(belief)))
else:
self.DIP_state['general'] = self.convert_general_b(belief)
self.DIP_state['joint'] = self.convert_joint_slot_b(belief)
for slot in self.slots:
self.DIP_state[slot] = self.convert_slot_b(belief, slot)
# create DIP vector and masks
self.get_DIP_vector()
self.beliefStateVec = None #for compatibility with GP sarsa implementation
def get_DIP_vector(self):
"""
convert the DIP state into a numpy vector and a set of masks per slot
:return:
"""
pad_v = np.zeros(len(self.DIP_state[self.slots[0]]))
slot_len = len(pad_v)
general_len = len(self.DIP_state['general']) + len(self.DIP_state['joint'])
pad_v[0] = 1.
self.DIP_vector = [pad_v]
self.DIP_masks = {}
mask_template = [False] * (slot_len * (len(self.slots) + 1)) + [True] * general_len
i = 1
for slot in self.slots:
self.DIP_vector.append(self.DIP_state[slot])
self.DIP_masks[slot] = np.array(mask_template)
self.DIP_masks[slot][slot_len*i:slot_len*(i+1)] = True
i += 1
self.DIP_vector.append(self.DIP_state['general'])
self.DIP_vector.append(self.DIP_state['joint'])
self.DIP_masks['general'] = np.array(mask_template)
self.DIP_masks['general'][:slot_len] = True
self.DIP_vector = np.concatenate(self.DIP_vector)
def get_beliefStateVec(self, slot):
return self.DIP_vector[self.DIP_masks[slot]]
def get_DIP_state(self, slot):
return np.array([self.DIP_state['general'] + self.DIP_state['joint'] + self.DIP_state[slot]])
def get_full_DIP_state(self):
full_slot_bstate = []
for slot in self.slots:
full_slot_bstate += self.DIP_state[slot]
full_DIP_state = np.array([full_slot_bstate + self.DIP_state['general'] + self.DIP_state['joint']])
DIP_mask = [True]*(len(self.DIP_state['general']) + len(self.DIP_state['joint'])) + [False] * len(full_slot_bstate)
return full_DIP_state, DIP_mask
def convert_general_b(self, belief):
"""
Extracts from the belief state the DIP vector corresponding to the general features (e.g. method, user act...)
:param belief: The full belief state
:return: The DIP general vector
"""
if type(belief) == DialogueState.DialogueState:
belief = belief.domainStates[belief.currentdomain]
dial_act = list(belief['beliefs']['discourseAct'].values())
requested = self._get_DIP_requested_vector(belief)
method = list(belief['beliefs']['method'].values())
features = [int(belief['features']['offerHappened']), int(belief['features']['lastActionInformNone']), int(bool(belief['features']['lastInformedVenue']))]
discriminable = [int(x) for x in belief['features']['inform_info']]
slot_n = 1/len(self.slots)
val_n = []
for slot in self.slots:
val_n.append(len(Ontology.global_ontology.get_informable_slot_values(self.domainString, slot)))
avg_value_n = 1/np.mean(val_n)
return dial_act + requested + method + features + discriminable + [slot_n, avg_value_n]
def _get_DIP_requested_vector(self, belief):
n_requested = sum([x>0.5 for x in list(belief['beliefs']['requested'].values())])
ret_vec = [0] * 5
if n_requested > 4:
n_requested = 4
ret_vec[n_requested] = 1.
return ret_vec
def convert_joint_slot_b(self, belief):
"""
Extracts the features for the joint DIP vector for all the slots
:param belief: The full belief state
:return: The DIP joint slot vector
"""
if type(belief) == DialogueState.DialogueState:
belief = belief.domainStates[belief.currentdomain]
joint_beliefs = []
joint_none = 1.
informable_beliefs = [copy.deepcopy(belief['beliefs'][x]) for x in list(belief['beliefs'].keys()) if x in self.slots] # this might be inneficent
for i, b in enumerate(informable_beliefs):
joint_none *= b['**NONE**']
del b['**NONE**'] # should I put **NONE** prob mass to dontcare?
informable_beliefs[i] = sorted([x for x in list(b.values()) if x != 0], reverse=True)[:2]
while len(informable_beliefs[i]) < 2:
informable_beliefs[i].append(0.)
for probs in product(*informable_beliefs):
joint_beliefs.append(np.prod(probs))
j_top = joint_beliefs[0]
j_2nd = joint_beliefs[1]
j_3rd = joint_beliefs[2]
first_joint_beliefs = joint_beliefs[:8]
if sum(first_joint_beliefs) == 0:
first_joint_beliefs = np.ones(len(first_joint_beliefs)) / len(first_joint_beliefs)
else:
first_joint_beliefs = np.array(first_joint_beliefs) / sum(first_joint_beliefs) # why normalise?
# difference between 1st and 2dn values
j_ent = entropy(first_joint_beliefs)
j_dif = joint_beliefs[0] - joint_beliefs[1]
j_dif_bin = [0.] * 5
idx = int((j_dif) * 5)
if idx == 5:
idx = 4
j_dif_bin[idx] = 1
# number of slots which are not **NONE**
n = 0
for key in belief['beliefs']:
if key in self.slots:
none_val = belief['beliefs'][key]['**NONE**']
top_val = np.max([belief['beliefs'][key][value] for value in list(belief['beliefs'][key].keys()) if value != '**NONE**'])
if top_val > none_val:
n += 1
not_none = [0.] * 5
if n > 4:
n = 4
not_none[n] = 1.
return [j_top, j_2nd, j_3rd, joint_none, j_ent, j_dif] + j_dif_bin + not_none
def convert_slot_b(self, belief, slot):
"""
Extracts the slot DIP features.
:param belief: The full belief state
:return: The slot DIP vector
"""
if type(belief) == DialogueState.DialogueState:
belief = belief.domainStates[belief.currentdomain]
b = [belief['beliefs'][slot]['**NONE**']] + sorted([belief['beliefs'][slot][value] for value in list(belief['beliefs'][slot].keys()) if value != '**NONE**'], reverse=True)
b_top = b[1]
b_2nd = b[2]
b_3rd = b[3]
b_ent = entropy(b)
b_none = b[0]
b_dif = b[1] - b[2]
b_dif_bin = [0.] * 5
idx = int((b_dif) * 5)
if idx == 5:
idx = 4
b_dif_bin[idx] = 1
non_zero_rate = [x != 0 for x in b[1:]]
non_zero_rate = sum(non_zero_rate) / len(non_zero_rate)
requested_prob = belief['beliefs']['requested'][slot]
# Ontology and DB based features
V_len = len(Ontology.global_ontology.get_informable_slot_values(self.domainString, slot))
norm_N_values = 1 / V_len
v_len_bin_vector = [0.] * self.N_bins
v_len_bin_vector[int(np.log2(V_len))] = 1.
#ocurr_prob, not_occur_prob, first_prob, second_prob, later_prob = self._get_importance_and_priority(slot) # this was manually set in the original DIP paper, I think it can be learned from the other features
val_dist_in_DB = self._get_val_dist_in_DB(slot)
# potential_contr_to_DB_search = self._get_potential_contr_to_DB_search(slot, belief)
#potential_contr_to_DB_search = [0, 0, 0, 0] # the implementation of this method is too slow right now, dont knwo how useful these features are (but they seem quite useful)
return [0, b_top, b_2nd, b_3rd, b_ent, b_none, non_zero_rate, requested_prob, norm_N_values, val_dist_in_DB] + b_dif_bin + v_len_bin_vector
def _get_val_dist_in_DB(self, slot):
# The entropy of the normalised histogram (|DB(s=v)|/|DB|) \forall v \in V_s
values = Ontology.global_ontology.get_informable_slot_values(self.domainString, slot)
entities = Ontology.global_ontology.entity_by_features(self.domainString, {})
val_dist = np.zeros(len(values))
n = 0
for ent in entities:
if ent[slot] != 'not available':
val_dist[values.index(ent[slot])] += 1
n += 1
return entropy(val_dist/n)
class padded_state(State):
def __init__(self, belief, domainString=None, action_freq=None):
#params
self.domainString = domainString
self.sortbelief = False
#self.action_freq = False
if Settings.config.has_option('feudalpolicy', 'sortbelief'):
self.sortbelief = Settings.config.getboolean('feudalpolicy', 'sortbelief')
#if Settings.config.has_option('feudalpolicy', 'action_freq'):
# self.action_freq = Settings.config.getboolean('feudalpolicy', 'action_freq')
self.slots = list(Ontology.global_ontology.get_informable_slots(domainString))
if 'price' in self.slots:
self.slots.remove('price') #remove price from SFR ont, its not used
if 'name' in self.slots:
self.slots.remove('name')
slot_values = Ontology.global_ontology.get_informable_slots_and_values(domainString)
self.max_v = np.max([len(slot_values[s]) for s in self.slots]) + 3 # (+**NONE**+dontcare+pad)
self.max_v = 158
self.si_size = 72 # size of general plus joint vectors
self.sd_size = self.max_v
self.DIP_state = {'general':None, 'joint':None}
for slot in self.slots:
self.DIP_state[slot]=None
# convert belief state into DIP params
if action_freq is not None:
self.DIP_state['general'] = np.concatenate((action_freq,self.convert_general_b(belief)))
else:
self.DIP_state['general'] = self.convert_general_b(belief)
self.DIP_state['joint'] = self.convert_joint_slot_b(belief)
for slot in self.slots:
self.DIP_state[slot] = self.convert_slot_b(belief, slot)
# create vector and masks
self.get_DIP_vector()
self.beliefStateVec = None #for compatibility with GP sarsa implementation
def get_DIP_vector(self):
"""
convert the state into a numpy vector and a set of masks per slot
:return:
"""
pad_v = np.zeros(len(self.DIP_state[self.slots[0]]))
slot_len = len(pad_v)
general_len = len(self.DIP_state['general']) + len(self.DIP_state['joint'])
pad_v[0] = 1.
self.DIP_vector = [pad_v]
self.DIP_masks = {}
mask_template = [False] * (slot_len * (len(self.slots) + 1)) + [True] * general_len
i = 1
for slot in self.slots:
self.DIP_vector.append(self.DIP_state[slot])
self.DIP_masks[slot] = np.array(mask_template)
self.DIP_masks[slot][slot_len*i:slot_len*(i+1)] = True
i += 1
self.DIP_vector.append(self.DIP_state['general'])
self.DIP_vector.append(self.DIP_state['joint'])
self.DIP_masks['general'] = np.array(mask_template)
self.DIP_masks['general'][:slot_len] = True
self.DIP_vector = np.concatenate(self.DIP_vector)
def get_beliefStateVec(self, slot):
return self.DIP_vector[self.DIP_masks[slot]]
def get_DIP_state(self, slot):
return np.array([self.DIP_state['general'] + self.DIP_state['joint'] + self.DIP_state[slot]])
def get_full_DIP_state(self):
full_slot_bstate = []
for slot in self.slots:
full_slot_bstate += self.DIP_state[slot]
full_DIP_state = np.array([full_slot_bstate + self.DIP_state['general'] + self.DIP_state['joint']])
DIP_mask = [True]*(len(self.DIP_state['general']) + len(self.DIP_state['joint'])) + [False] * len(full_slot_bstate)
return full_DIP_state, DIP_mask
def convert_general_b(self, belief):
"""
Extracts from the belief state the vector corresponding to the general features (e.g. method, user act...)
:param belief: The full belief state
:return: The general vector
"""
if type(belief) == DialogueState.DialogueState:
belief = belief.domainStates[belief.currentdomain]
dial_act = list(belief['beliefs']['discourseAct'].values())
requested = self._get_requested_vector(belief)
method = list(belief['beliefs']['method'].values())
features = [int(belief['features']['offerHappened']), int(belief['features']['lastActionInformNone']),
int(bool(belief['features']['lastInformedVenue']))]
discriminable = [int(x) for x in belief['features']['inform_info']]
return dial_act + requested + method + features + discriminable
def _get_requested_vector(self, belief):
n_requested = sum([x>0.5 for x in list(belief['beliefs']['requested'].values())])
ret_vec = [0] * 5
if n_requested > 4:
n_requested = 4
ret_vec[n_requested] = 1.
return ret_vec
def convert_joint_slot_b(self, belief):
"""
Extracts the features for the joint vector of all the slots
:param belief: The full belief state
:return: The joint slot vector
"""
#ic340 note: this should probably be done with an rnn encoder
if type(belief) == DialogueState.DialogueState:
belief = belief.domainStates[belief.currentdomain]
joint_beliefs = []
joint_none = 1.
informable_beliefs = [copy.deepcopy(belief['beliefs'][x]) for x in list(belief['beliefs'].keys()) if
x in self.slots] # this might be inneficent
for i, b in enumerate(informable_beliefs):
joint_none *= b['**NONE**']
del b['**NONE**'] # should I put **NONE** prob mass to dontcare?
informable_beliefs[i] = sorted([x for x in list(b.values()) if x != 0], reverse=True)[:2]
while len(informable_beliefs[i]) < 2:
informable_beliefs[i].append(0.)
for probs in product(*informable_beliefs):
joint_beliefs.append(np.prod(probs))
first_joint_beliefs = -np.ones(20)
joint_beliefs = joint_beliefs[:20]
len_joint_beliefs = len(joint_beliefs)
first_joint_beliefs[:len_joint_beliefs] = joint_beliefs
if sum(first_joint_beliefs) == 0:
first_joint_beliefs = list(np.ones(len(first_joint_beliefs)) / len(first_joint_beliefs))
else:
first_joint_beliefs = list(np.array(first_joint_beliefs) / sum(first_joint_beliefs)) # why normalise?
# number of slots which are not **NONE**
n = 0
for key in belief['beliefs']:
if key in self.slots:
none_val = belief['beliefs'][key]['**NONE**']
top_val = np.max(
[belief['beliefs'][key][value] for value in list(belief['beliefs'][key].keys()) if value != '**NONE**'])
if top_val > none_val:
n += 1
not_none = [0.] * 5
if n > 4:
n = 4
not_none[n] = 1.
return [joint_none] + first_joint_beliefs + not_none
def convert_slot_b(self, belief, slot):
"""
Extracts the slot features by padding the distribution vector with -1s.
:param belief: The full belief state
:return: The slot DIP vector
"""
if type(belief) == DialogueState.DialogueState:
belief = belief.domainStates[belief.currentdomain]
if self.sortbelief is True:
b = [belief['beliefs'][slot]['**NONE**']] + sorted(
[belief['beliefs'][slot][value] for value in list(belief['beliefs'][slot].keys()) if value != '**NONE**'],
reverse=True) # sorted values
else:
b = [belief['beliefs'][slot]['**NONE**']] + \
[belief['beliefs'][slot][value] for value in list(belief['beliefs'][slot].keys()) if value != '**NONE**'] # unsorted values
assert len(b) <= self.max_v -1, 'length of bstate ({}) is longer than self.max_v ({})'.format(len(b), self.max_v-1)
padded_b = -np.ones(self.max_v)
padded_b[0] = 0.
padded_b[1:len(b)+1] = b
return padded_b
def _get_val_dist_in_DB(self, slot):
# The entropy of the normalised histogram (|DB(s=v)|/|DB|) \forall v \in V_s
values = Ontology.global_ontology.get_informable_slot_values(self.domainString, slot)
entities = Ontology.global_ontology.entity_by_features(self.domainString, {})
val_dist = np.zeros(len(values))
n = 0
for ent in entities:
if ent[slot] != 'not available':
val_dist[values.index(ent[slot])] += 1
n += 1
return entropy(val_dist/n)
def get_test_beliefs():
b1 = {'beliefs': {'allowedforkids': {'**NONE**': 0.0,
'0': 0.0,
'1': 0.0,
'dontcare': 1.0},
'area': {'**NONE**': 1.0,
'alamo square': 0.0,
'amanico ergina village': 0.0,
'anza vista': 0.0,
'ashbury heights': 0.0,
'balboa terrace': 0.0,
'bayview district': 0.0,
'bayview heights': 0.0,
'bernal heights': 0.0,
'bernal heights north': 0.0,
'bernal heights south': 0.0,
'buena vista park': 0.0,
'castro': 0.0,
'cathedral hill': 0.0,
'cayuga terrace': 0.0,
'central richmond': 0.0,
'central sunset': 0.0,
'central waterfront': 0.0,
'chinatown': 0.0,
'civic center': 0.0,
'clarendon heights': 0.0,
'cole valley': 0.0,
'corona heights': 0.0,
'cow hollow': 0.0,
'crocker amazon': 0.0,
'diamond heights': 0.0,
'doelger city': 0.0,
'dogpatch': 0.0,
'dolores heights': 0.0,
'dontcare': 0.0,
'downtown': 0.0,
'duboce triangle': 0.0,
'embarcadero': 0.0,
'eureka valley': 0.0,
'eureka valley dolores heights': 0.0,
'excelsior': 0.0,
'financial district': 0.0,
'financial district south': 0.0,
'fishermans wharf': 0.0,
'forest hill': 0.0,
'forest hill extension': 0.0,
'forest knolls': 0.0,
'fort mason': 0.0,
'fort winfield scott': 0.0,
'frederick douglass haynes gardens': 0.0,
'friendship village': 0.0,
'glen park': 0.0,
'glenridge': 0.0,
'golden gate heights': 0.0,
'golden gate park': 0.0,
'haight ashbury': 0.0,
'hayes valley': 0.0,
'hunters point': 0.0,
'india basin': 0.0,
'ingleside': 0.0,
'ingleside heights': 0.0,
'ingleside terrace': 0.0,
'inner mission': 0.0,
'inner parkside': 0.0,
'inner richmond': 0.0,
'inner sunset': 0.0,
'inset': 0.0,
'jordan park': 0.0,
'laguna honda': 0.0,
'lake': 0.0,
'lake shore': 0.0,
'lakeside': 0.0,
'laurel heights': 0.0,
'lincoln park': 0.0,
'lincoln park lobos': 0.0,
'little hollywood': 0.0,
'little italy': 0.0,
'little osaka': 0.0,
'little russia': 0.0,
'lone mountain': 0.0,
'lower haight': 0.0,
'lower nob hill': 0.0,
'lower pacific heights': 0.0,
'malcolm x square': 0.0,
'marcus garvey square': 0.0,
'marina district': 0.0,
'martin luther king square': 0.0,
'mastro': 0.0,
'merced heights': 0.0,
'merced manor': 0.0,
'midtown terrace': 0.0,
'miraloma park': 0.0,
'mission bay': 0.0,
'mission district': 0.0,
'mission dolores': 0.0,
'mission terrace': 0.0,
'monterey heights': 0.0,
'mount davidson manor': 0.0,
'nob hill': 0.0,
'noe valley': 0.0,
'noma': 0.0,
'north beach': 0.0,
'north panhandle': 0.0,
'north park': 0.0,
'north waterfront': 0.0,
'oceanview': 0.0,
'opera plaza': 0.0,
'outer mission': 0.0,
'outer parkside': 0.0,
'outer richmond': 0.0,
'outer sunset': 0.0,
'outset': 0.0,
'pacific heights': 0.0,
'panhandle': 0.0,
'park merced': 0.0,
'parkmerced': 0.0,
'parkside': 0.0,
'pine lake park': 0.0,
'portola': 0.0,
'potrero flats': 0.0,
'potrero hill': 0.0,
'presidio': 0.0,
'presidio heights': 0.0,
'richmond district': 0.0,
'russian hill': 0.0,
'saint francis wood': 0.0,
'san francisco airport': 0.0,
'san francisco state university': 0.0,
'sea cliff': 0.0,
'sherwood forest': 0.0,
'showplace square': 0.0,
'silver terrace': 0.0,
'somisspo': 0.0,
'south basin': 0.0,
'south beach': 0.0,
'south of market': 0.0,
'st francis square': 0.0,
'st francis wood': 0.0,
'stonestown': 0.0,
'sunnydale': 0.0,
'sunnyside': 0.0,
'sunset district': 0.0,
'telegraph hill': 0.0,
'tenderloin': 0.0,
'thomas paine square': 0.0,
'transmission': 0.0,
'treasure island': 0.0,
'twin peaks': 0.0,
'twin peaks west': 0.0,
'upper market': 0.0,
'van ness': 0.0,
'victoria mews': 0.0,
'visitacion valley': 0.0,
'vista del monte': 0.0,
'west of twin peaks': 0.0,
'west portal': 0.0,
'western addition': 0.0,
'westlake and olympic': 0.0,
'westwood highlands': 0.0,
'westwood park': 0.0,
'yerba buena island': 0.0,
'zion district': 0.0},
'discourseAct': {'ack': 0.0,
'bye': 0.0,
'hello': 0.0,
'none': 1.0,
'repeat': 0.0,
'silence': 0.0,
'thankyou': 0.0},
'food': {'**NONE**': 0.0,
'afghan': 0.0,
'arabian': 0.0,
'asian': 0.0,
'basque': 0.0,
'brasseries': 0.0,
'brazilian': 0.0,
'buffets': 0.0,
'burgers': 0.0,
'burmese': 0.0,
'cafes': 0.0,
'cambodian': 0.0,
'cantonese': 1.0,
'chinese': 0.0,
'comfort food': 0.0,
'creperies': 0.0,
'dim sum': 0.0,
'dontcare': 0.0,
'ethiopian': 0.0,
'ethnic food': 0.0,
'french': 0.0,
'gluten free': 0.0,
'himalayan': 0.0,
'indian': 0.0,
'indonesian': 0.0,
'indpak': 0.0,
'italian': 0.0,
'japanese': 0.0,
'korean': 0.0,
'kosher': 0.0,
'latin': 0.0,
'lebanese': 0.0,
'lounges': 0.0,
'malaysian': 0.0,
'mediterranean': 0.0,
'mexican': 0.0,
'middle eastern': 0.0,
'modern european': 0.0,
'moroccan': 0.0,
'new american': 0.0,
'pakistani': 0.0,
'persian': 0.0,
'peruvian': 0.0,
'pizza': 0.0,
'raw food': 0.0,
'russian': 0.0,
'sandwiches': 0.0,
'sea food': 0.0,
'shanghainese': 0.0,
'singaporean': 0.0,
'soul food': 0.0,
'spanish': 0.0,
'steak': 0.0,
'sushi': 0.0,
'taiwanese': 0.0,
'tapas': 0.0,
'thai': 0.0,
'traditionnal american': 0.0,
'turkish': 0.0,
'vegetarian': 0.0,
'vietnamese': 0.0},
'goodformeal': {'**NONE**': 0.0,
'breakfast': 0.0,
'brunch': 0.0,
'dinner': 0.0,
'dontcare': 1.0,
'lunch': 0.0},
'method': {'byalternatives': 0.0,
'byconstraints': 0.0,
'byname': 0.9285714285714286,
'finished': 0.0,
'none': 0.0714285714285714,
'restart': 0.0},
'name': {'**NONE**': 0.0,
'a 16': 0.0,
'a la turca restaurant': 0.0,
'abacus': 0.0,
'alamo square seafood grill': 0.0,
'albona ristorante istriano': 0.0,
'alborz persian cuisine': 0.0,
'allegro romano': 0.0,
'amarena': 0.0,
'amber india': 0.0,
'ame': 0.0,
'ananda fuara': 0.0,
'anchor oyster bar': 0.0,
'angkor borei restaurant': 0.0,
'aperto restaurant': 0.0,
'ar roi restaurant': 0.0,
'arabian nights restaurant': 0.0,
'assab eritrean restaurant': 0.0,
'atelier crenn': 0.0,
'aux delices restaurant': 0.0,
'aziza': 0.0,
'b star bar': 0.0,
'bar crudo': 0.0,
'beijing restaurant': 0.0,
'bella trattoria': 0.0,
'benu': 0.0,
'betelnut': 0.0,
'bistro central parc': 0.0,
'bix': 0.0,
'borgo': 0.0,
'borobudur restaurant': 0.0,
'bouche': 0.0,
'boulevard': 0.0,
'brothers restaurant': 0.0,
'bund shanghai restaurant': 0.0,
'burma superstar': 0.0,
'butterfly': 0.0,
'cafe claude': 0.0,
'cafe jacqueline': 0.0,
'campton place restaurant': 0.0,
'canteen': 0.0,
'canto do brasil restaurant': 0.0,
'capannina': 0.0,
'capital restaurant': 0.0,
'chai yo thai restaurant': 0.0,
'chaya brasserie': 0.0,
'chenery park': 0.0,
'chez maman': 0.0,
'chez papa bistrot': 0.0,
'chez spencer': 0.0,
'chiaroscuro': 0.0,
'chouchou': 0.0,
'chow': 0.0,
'city view restaurant': 0.0,
'claudine': 0.0,
'coi': 0.0,
'colibri mexican bistro': 0.0,
'coqueta': 0.0,
'crustacean restaurant': 0.0,
'da flora a venetian osteria': 0.0,
'darbar restaurant': 0.0,
'delancey street restaurant': 0.0,
'delfina': 0.0,
'dong baek restaurant': 0.0,
'dontcare': 0.0,
'dosa on fillmore': 0.0,
'dosa on valencia': 0.0,
'eiji': 0.0,
'enjoy vegetarian restaurant': 0.0,
'espetus churrascaria': 0.0,
'fang': 0.0,
'farallon': 0.0,
'fattoush restaurant': 0.0,
'fifth floor': 0.0,
'fino restaurant': 0.0,
'firefly': 0.0,
'firenze by night ristorante': 0.0,
'fleur de lys': 0.0,
'fog harbor fish house': 0.0,
'forbes island': 0.0,
'foreign cinema': 0.0,
'frances': 0.0,
'franchino': 0.0,
'franciscan crab restaurant': 0.0,
'frascati': 0.0,
'fresca': 0.0,
'fringale': 0.0,
'fujiyama ya japanese restaurant': 0.0,
'gajalee': 0.0,
'gamine': 0.0,
'garcon restaurant': 0.0,
'gary danko': 0.0,
'gitane': 0.0,
'golden era restaurant': 0.0,
'gracias madre': 0.0,
'great eastern restaurant': 1.0,
'hakka restaurant': 0.0,
'hakkasan': 0.0,
'han second kwan': 0.0,
'heirloom cafe': 0.0,
'helmand palace': 0.0,
'hi dive': 0.0,
'hillside supper club': 0.0,
'hillstone': 0.0,
'hong kong clay pot restaurant': 0.0,
'house of nanking': 0.0,
'house of prime rib': 0.0,
'hunan homes restaurant': 0.0,
'incanto': 0.0,
'isa': 0.0,
'jannah': 0.0,
'jasmine garden': 0.0,
'jitlada thai cuisine': 0.0,
'kappa japanese restaurant': 0.0,
'kim thanh restaurant': 0.0,
'kirin chinese restaurant': 0.0,
'kiss seafood': 0.0,
'kokkari estiatorio': 0.0,
'la briciola': 0.0,
'la ciccia': 0.0,
'la folie': 0.0,
'la mediterranee': 0.0,
'la traviata': 0.0,
'lahore karahi': 0.0,
'lavash': 0.0,
'le charm': 0.0,
'le colonial': 0.0,
'le soleil': 0.0,
'lime tree southeast asian kitchen': 0.0,
'little delhi': 0.0,
'little nepal': 0.0,
'luce': 0.0,
'lucky creation restaurant': 0.0,
'luella': 0.0,
'lupa': 0.0,
'm y china': 0.0,
'maki restaurant': 0.0,
'mangia tutti ristorante': 0.0,
'manna': 0.0,
'marlowe': 0.0,
'marnee thai': 0.0,
'maverick': 0.0,
'mela tandoori kitchen': 0.0,
'mescolanza': 0.0,
'mezes': 0.0,
'michael mina restaurant': 0.0,
'millennium': 0.0,
'minako organic japanese restaurant': 0.0,
'minami restaurant': 0.0,
'mission chinese food': 0.0,
'mochica': 0.0,
'modern thai': 0.0,
'mona lisa restaurant': 0.0,
'mozzeria': 0.0,
'muguboka restaurant': 0.0,
'my tofu house': 0.0,
'nicaragua restaurant': 0.0,
'nob hill cafe': 0.0,
'nopa': 0.0,
'old jerusalem restaurant': 0.0,
'old skool cafe': 0.0,
'one market restaurant': 0.0,
'orexi': 0.0,
'original us restaurant': 0.0,
'osha thai': 0.0,
'oyaji restaurant': 0.0,
'ozumo': 0.0,
'pad thai restaurant': 0.0,
'panta rei restaurant': 0.0,
'park tavern': 0.0,
'pera': 0.0,
'piperade': 0.0,
'ploy 2': 0.0,
'poc chuc': 0.0,
'poesia': 0.0,
'prospect': 0.0,
'quince': 0.0,
'radius san francisco': 0.0,
'range': 0.0,
'red door cafe': 0.0,
'restaurant ducroix': 0.0,
'ristorante bacco': 0.0,
'ristorante ideale': 0.0,
'ristorante milano': 0.0,
'ristorante parma': 0.0,
'rn74': 0.0,
'rue lepic': 0.0,
'saha': 0.0,
'sai jai thai restaurant': 0.0,
'salt house': 0.0,
'san tung chinese restaurant': 0.0,
'san wang restaurant': 0.0,
'sanjalisco': 0.0,
'sanraku': 0.0,
'seasons': 0.0,
'seoul garden': 0.0,
'seven hills': 0.0,
'shangri la vegetarian restaurant': 0.0,
'singapore malaysian restaurant': 0.0,
'skool': 0.0,
'so': 0.0,
'sotto mare': 0.0,
'source': 0.0,
'specchio ristorante': 0.0,
'spruce': 0.0,
'straits restaurant': 0.0,
'stroganoff restaurant': 0.0,
'sunflower potrero hill': 0.0,
'sushi bistro': 0.0,
'taiwan restaurant': 0.0,
'tanuki restaurant': 0.0,
'tataki': 0.0,
'tekka japanese restaurant': 0.0,
'thai cottage restaurant': 0.0,
'thai house express': 0.0,
'thai idea vegetarian': 0.0,
'thai time restaurant': 0.0,
'thanh long': 0.0,
'the big 4 restaurant': 0.0,
'the blue plate': 0.0,
'the house': 0.0,
'the richmond': 0.0,
'the slanted door': 0.0,
'the stinking rose': 0.0,
'thep phanom thai restaurant': 0.0,
'tommys joynt': 0.0,
'toraya japanese restaurant': 0.0,
'town hall': 0.0,
'trattoria contadina': 0.0,
'tu lan': 0.0,
'tuba restaurant': 0.0,
'u lee restaurant': 0.0,
'udupi palace': 0.0,
'venticello ristorante': 0.0,
'vicoletto': 0.0,
'yank sing': 0.0,
'yummy yummy': 0.0,
'z and y restaurant': 0.0,
'zadin': 0.0,
'zare at fly trap': 0.0,
'zarzuela': 0.0,
'zen yai thai restaurant': 0.0,
'zuni cafe': 0.0,
'zushi puzzle': 0.0},
'near': {'**NONE**': 0.0,
'bayview hunters point': 0.0,
'dontcare': 1.0,
'haight': 0.0,
'japantown': 0.0,
'marina cow hollow': 0.0,
'mission': 0.0,
'nopa': 0.0,
'north beach telegraph hill': 0.0,
'soma': 0.0,
'union square': 0.0},
'price': {'**NONE**': 1.0,
'10 dollar': 0.0,
'10 euro': 0.0,
'11 euro': 0.0,
'15 euro': 0.0,
'18 euro': 0.0,
'20 euro': 0.0,
'22 euro': 0.0,
'25 euro': 0.0,
'26 euro': 0.0,
'29 euro': 0.0,
'37 euro': 0.0,
'6': 0.0,
'7': 0.0,
'9': 0.0,
'between 0 and 15 euro': 0.0,
'between 10 and 13 euro': 0.0,
'between 10 and 15 euro': 0.0,
'between 10 and 18 euro': 0.0,
'between 10 and 20 euro': 0.0,
'between 10 and 23 euro': 0.0,
'between 10 and 30 euro': 0.0,
'between 11 and 15 euro': 0.0,
'between 11 and 18 euro': 0.0,
'between 11 and 22 euro': 0.0,
'between 11 and 25 euro': 0.0,
'between 11 and 29 euro': 0.0,
'between 11 and 35 euro': 0.0,
'between 13 and 15 euro': 0.0,
'between 13 and 18 euro': 0.0,
'between 13 and 24 euro': 0.0,
'between 15 and 18 euro': 0.0,
'between 15 and 22 euro': 0.0,
'between 15 and 26 euro': 0.0,
'between 15 and 29 euro': 0.0,
'between 15 and 33 euro': 0.0,
'between 15 and 44 euro': 0.0,
'between 15 and 58 euro': 0.0,
'between 18 and 26 euro': 0.0,
'between 18 and 29 euro': 0.0,
'between 18 and 44 euro': 0.0,
'between 18 and 55 euro': 0.0,
'between 18 and 58 euro': 0.0,
'between 18 and 73 euro': 0.0,
'between 18 and 78 euro': 0.0,
'between 2 and 15 euro': 0.0,
'between 20 and 30 euro': 0.0,
'between 21 and 23 euro': 0.0,
'between 22 and 29 euro': 0.0,
'between 22 and 30 dollar': 0.0,
'between 22 and 37 euro': 0.0,
'between 22 and 58 euro': 0.0,
'between 22 and 73 euro': 0.0,
'between 23 and 29': 0.0,
'between 23 and 29 euro': 0.0,
'between 23 and 37 euro': 0.0,
'between 23 and 58': 0.0,
'between 23 and 58 euro': 0.0,
'between 26 and 33 euro': 0.0,
'between 26 and 34 euro': 0.0,
'between 26 and 37 euro': 0.0,
'between 29 and 37 euro': 0.0,
'between 29 and 44 euro': 0.0,
'between 29 and 58 euro': 0.0,
'between 29 and 73 euro': 0.0,
'between 30 and 58': 0.0,
'between 30 and 58 euro': 0.0,
'between 31 and 50 euro': 0.0,
'between 37 and 110 euro': 0.0,
'between 37 and 44 euro': 0.0,
'between 37 and 58 euro': 0.0,
'between 4 and 22 euro': 0.0,
'between 4 and 58 euro': 0.0,
'between 5 an 30 euro': 0.0,
'between 5 and 10 euro': 0.0,
'between 5 and 11 euro': 0.0,
'between 5 and 15 dollar': 0.0,
'between 5 and 20 euro': 0.0,
'between 5 and 25 euro': 0.0,
'between 6 and 10 euro': 0.0,
'between 6 and 11 euro': 0.0,
'between 6 and 15 euro': 0.0,
'between 6 and 29 euro': 0.0,
'between 7 and 11 euro': 0.0,
'between 7 and 13 euro': 0.0,
'between 7 and 15 euro': 0.0,
'between 7 and 37 euro': 0.0,
'between 8 and 22 euro': 0.0,
'between 9 and 13 dolllar': 0.0,
'between 9 and 15 euro': 0.0,
'between 9 and 58 euro': 0.0,
'bteween 11 and 15 euro': 0.0,
'bteween 15 and 22 euro': 0.0,
'bteween 22 and 37': 0.0,
'bteween 30 and 58 euro': 0.0,
'bteween 51 and 73 euro': 0.0,
'netween 20 and 30 euro': 0.0},
'pricerange': {'**NONE**': 1.0,
'cheap': 0.0,
'dontcare': 0.0,
'expensive': 0.0,
'moderate': 0.0},
'requested': {'addr': 1.0,
'allowedforkids': 0.0,
'area': 0.0,
'food': 0.0,
'goodformeal': 0.0,
'name': 0.0,
'near': 0.0,
'phone': 1,
'postcode': 0.0,
'price': 0.0,
'pricerange': 0.0}},
'features': {'inform_info': [False,
False,
True,
False,
True,
False,
False,
True,
False,
True,
False,
False,
True,
False,
True,
False,
False,
True,
False,
True,
False,
False,
True,
False,
True],
'informedVenueSinceNone': ['great eastern restaurant',
'great eastern restaurant'],
'lastActionInformNone': False,
'lastInformedVenue': 'great eastern restaurant',
'offerHappened': False},
'userActs': [('request(name="great eastern restaurant",phone)', 1.0)]}
b2 = {'beliefs': {'allowedforkids': {'**NONE**': 0.014367834316388661,
'0': 0.009175995595522114,
'1': 0.9579333306577846,
'dontcare': 0.01852283943030468},
'area': {'**NONE**': 0.9753165718480455,
'alamo square': 0.0,
'amanico ergina village': 0.0,
'anza vista': 0.0,
'ashbury heights': 0.0,
'balboa terrace': 0.0,
'bayview district': 0.0,
'bayview heights': 0.0,
'bernal heights': 0.0,
'bernal heights north': 0.0,
'bernal heights south': 0.0,
'buena vista park': 0.0,
'castro': 0.0,
'cathedral hill': 0.0,
'cayuga terrace': 0.0,
'central richmond': 0.0,
'central sunset': 0.0,
'central waterfront': 0.0,
'chinatown': 0.0,
'civic center': 0.0,
'clarendon heights': 0.0,
'cole valley': 0.0,
'corona heights': 0.0,
'cow hollow': 0.0,
'crocker amazon': 0.0,
'diamond heights': 0.0,
'doelger city': 0.0,
'dogpatch': 0.0,
'dolores heights': 0.0,
'dontcare': 0.0,
'downtown': 0.0,
'duboce triangle': 0.0,
'embarcadero': 0.0,
'eureka valley': 0.0,
'eureka valley dolores heights': 0.0,
'excelsior': 0.0,
'financial district': 0.0,
'financial district south': 0.0,
'fishermans wharf': 0.0,
'forest hill': 0.0,
'forest hill extension': 0.0,
'forest knolls': 0.0,
'fort mason': 0.0,
'fort winfield scott': 0.0,
'frederick douglass haynes gardens': 0.0,
'friendship village': 0.0,
'glen park': 0.0,
'glenridge': 0.0,
'golden gate heights': 0.0,
'golden gate park': 0.0,
'haight ashbury': 0.0,
'hayes valley': 0.0,
'hunters point': 0.0,
'india basin': 0.0,
'ingleside': 0.0,
'ingleside heights': 0.0,
'ingleside terrace': 0.0,
'inner mission': 0.0,
'inner parkside': 0.0,
'inner richmond': 0.0,
'inner sunset': 0.0,
'inset': 0.0,
'jordan park': 0.0,
'laguna honda': 0.0,
'lake': 0.0,
'lake shore': 0.0,
'lakeside': 0.0,
'laurel heights': 0.0,
'lincoln park': 0.0,
'lincoln park lobos': 0.0,
'little hollywood': 0.0,
'little italy': 0.0,
'little osaka': 0.0,
'little russia': 0.0,
'lone mountain': 0.0,
'lower haight': 0.0,
'lower nob hill': 0.0,
'lower pacific heights': 0.0,
'malcolm x square': 0.0,
'marcus garvey square': 0.0,
'marina district': 0.0,
'martin luther king square': 0.0,
'mastro': 0.0,
'merced heights': 0.0,
'merced manor': 0.0,
'midtown terrace': 0.0,
'miraloma park': 0.0,
'mission bay': 0.0,
'mission district': 0.0,
'mission dolores': 0.0,
'mission terrace': 0.0,
'monterey heights': 0.0,
'mount davidson manor': 0.0,
'nob hill': 0.0,
'noe valley': 0.0,
'noma': 0.0,
'north beach': 0.0,
'north panhandle': 0.0,
'north park': 0.0,
'north waterfront': 0.0,
'oceanview': 0.0,
'opera plaza': 0.0,
'outer mission': 0.0,
'outer parkside': 0.0,
'outer richmond': 0.0,
'outer sunset': 0.0,
'outset': 0.0,
'pacific heights': 0.0,
'panhandle': 0.0,
'park merced': 0.0,
'parkmerced': 0.0,
'parkside': 0.0,
'pine lake park': 0.0,
'portola': 0.0,
'potrero flats': 0.0,
'potrero hill': 0.0,
'presidio': 0.0,
'presidio heights': 0.0,
'richmond district': 0.0,
'russian hill': 0.0,
'saint francis wood': 0.0,
'san francisco airport': 0.0,
'san francisco state university': 0.0,
'sea cliff': 0.0,
'sherwood forest': 0.0,
'showplace square': 0.0,
'silver terrace': 0.0,
'somisspo': 0.0,
'south basin': 0.0,
'south beach': 0.0,
'south of market': 0.0,
'st francis square': 0.0,
'st francis wood': 0.0,
'stonestown': 0.024683428151954484,
'sunnydale': 0.0,
'sunnyside': 0.0,
'sunset district': 0.0,
'telegraph hill': 0.0,
'tenderloin': 0.0,
'thomas paine square': 0.0,
'transmission': 0.0,
'treasure island': 0.0,
'twin peaks': 0.0,
'twin peaks west': 0.0,
'upper market': 0.0,
'van ness': 0.0,
'victoria mews': 0.0,
'visitacion valley': 0.0,
'vista del monte': 0.0,
'west of twin peaks': 0.0,
'west portal': 0.0,
'western addition': 0.0,
'westlake and olympic': 0.0,
'westwood highlands': 0.0,
'westwood park': 0.0,
'yerba buena island': 0.0,
'zion district': 0.0},
'discourseAct': {'ack': 0.0,
'bye': 0.0,
'hello': 0.0,
'none': 0.9999999999999998,
'repeat': 0.0,
'silence': 0.0,
'thankyou': 0.0},
'food': {'**NONE**': 1.0,
'afghan': 0.0,
'arabian': 0.0,
'asian': 0.0,
'basque': 0.0,
'brasseries': 0.0,
'brazilian': 0.0,
'buffets': 0.0,
'burgers': 0.0,
'burmese': 0.0,
'cafes': 0.0,
'cambodian': 0.0,
'cantonese': 0.0,
'chinese': 0.0,
'comfort food': 0.0,
'creperies': 0.0,
'dim sum': 0.0,
'dontcare': 0.0,
'ethiopian': 0.0,
'ethnic food': 0.0,
'french': 0.0,
'gluten free': 0.0,
'himalayan': 0.0,
'indian': 0.0,
'indonesian': 0.0,
'indpak': 0.0,
'italian': 0.0,
'japanese': 0.0,
'korean': 0.0,
'kosher': 0.0,
'latin': 0.0,
'lebanese': 0.0,
'lounges': 0.0,
'malaysian': 0.0,
'mediterranean': 0.0,
'mexican': 0.0,
'middle eastern': 0.0,
'modern european': 0.0,
'moroccan': 0.0,
'new american': 0.0,
'pakistani': 0.0,
'persian': 0.0,
'peruvian': 0.0,
'pizza': 0.0,
'raw food': 0.0,
'russian': 0.0,
'sandwiches': 0.0,
'sea food': 0.0,
'shanghainese': 0.0,
'singaporean': 0.0,
'soul food': 0.0,
'spanish': 0.0,
'steak': 0.0,
'sushi': 0.0,
'taiwanese': 0.0,
'tapas': 0.0,
'thai': 0.0,
'traditionnal american': 0.0,
'turkish': 0.0,
'vegetarian': 0.0,
'vietnamese': 0.0},
'goodformeal': {'**NONE**': 1.0,
'breakfast': 0.0,
'brunch': 0.0,
'dinner': 0.0,
'dontcare': 0.0,
'lunch': 0.0},
'method': {'byalternatives': 0.0,
'byconstraints': 0.7725475751076113,
'byname': 0.0,
'finished': 0.0,
'none': 0.0,
'restart': 0.0},
'name': {'**NONE**': 1.0,
'a 16': 0.0,
'a la turca restaurant': 0.0,
'abacus': 0.0,
'alamo square seafood grill': 0.0,
'albona ristorante istriano': 0.0,
'alborz persian cuisine': 0.0,
'allegro romano': 0.0,
'amarena': 0.0,
'amber india': 0.0,
'ame': 0.0,
'ananda fuara': 0.0,
'anchor oyster bar': 0.0,
'angkor borei restaurant': 0.0,
'aperto restaurant': 0.0,
'ar roi restaurant': 0.0,
'arabian nights restaurant': 0.0,
'assab eritrean restaurant': 0.0,
'atelier crenn': 0.0,
'aux delices restaurant': 0.0,
'aziza': 0.0,
'b star bar': 0.0,
'bar crudo': 0.0,
'beijing restaurant': 0.0,
'bella trattoria': 0.0,
'benu': 0.0,
'betelnut': 0.0,
'bistro central parc': 0.0,
'bix': 0.0,
'borgo': 0.0,
'borobudur restaurant': 0.0,
'bouche': 0.0,
'boulevard': 0.0,
'brothers restaurant': 0.0,
'bund shanghai restaurant': 0.0,
'burma superstar': 0.0,
'butterfly': 0.0,
'cafe claude': 0.0,
'cafe jacqueline': 0.0,
'campton place restaurant': 0.0,
'canteen': 0.0,
'canto do brasil restaurant': 0.0,
'capannina': 0.0,
'capital restaurant': 0.0,
'chai yo thai restaurant': 0.0,
'chaya brasserie': 0.0,
'chenery park': 0.0,
'chez maman': 0.0,
'chez papa bistrot': 0.0,
'chez spencer': 0.0,
'chiaroscuro': 0.0,
'chouchou': 0.0,
'chow': 0.0,
'city view restaurant': 0.0,
'claudine': 0.0,
'coi': 0.0,
'colibri mexican bistro': 0.0,
'coqueta': 0.0,
'crustacean restaurant': 0.0,
'da flora a venetian osteria': 0.0,
'darbar restaurant': 0.0,
'delancey street restaurant': 0.0,
'delfina': 0.0,
'dong baek restaurant': 0.0,
'dosa on fillmore': 0.0,
'dosa on valencia': 0.0,
'eiji': 0.0,
'enjoy vegetarian restaurant': 0.0,
'espetus churrascaria': 0.0,
'fang': 0.0,
'farallon': 0.0,
'fattoush restaurant': 0.0,
'fifth floor': 0.0,
'fino restaurant': 0.0,
'firefly': 0.0,
'firenze by night ristorante': 0.0,
'fleur de lys': 0.0,
'fog harbor fish house': 0.0,
'forbes island': 0.0,
'foreign cinema': 0.0,
'frances': 0.0,
'franchino': 0.0,
'franciscan crab restaurant': 0.0,
'frascati': 0.0,
'fresca': 0.0,
'fringale': 0.0,
'fujiyama ya japanese restaurant': 0.0,
'gajalee': 0.0,
'gamine': 0.0,
'garcon restaurant': 0.0,
'gary danko': 0.0,
'gitane': 0.0,
'golden era restaurant': 0.0,
'gracias madre': 0.0,
'great eastern restaurant': 0.0,
'hakka restaurant': 0.0,
'hakkasan': 0.0,
'han second kwan': 0.0,
'heirloom cafe': 0.0,
'helmand palace': 0.0,
'hi dive': 0.0,
'hillside supper club': 0.0,
'hillstone': 0.0,
'hong kong clay pot restaurant': 0.0,
'house of nanking': 0.0,
'house of prime rib': 0.0,
'hunan homes restaurant': 0.0,
'incanto': 0.0,
'isa': 0.0,
'jannah': 0.0,
'jasmine garden': 0.0,
'jitlada thai cuisine': 0.0,
'kappa japanese restaurant': 0.0,
'kim thanh restaurant': 0.0,
'kirin chinese restaurant': 0.0,
'kiss seafood': 0.0,
'kokkari estiatorio': 0.0,
'la briciola': 0.0,
'la ciccia': 0.0,
'la folie': 0.0,
'la mediterranee': 0.0,
'la traviata': 0.0,
'lahore karahi': 0.0,
'lavash': 0.0,
'le charm': 0.0,
'le colonial': 0.0,
'le soleil': 0.0,
'lime tree southeast asian kitchen': 0.0,
'little delhi': 0.0,
'little nepal': 0.0,
'luce': 0.0,
'lucky creation restaurant': 0.0,
'luella': 0.0,
'lupa': 0.0,
'm y china': 0.0,
'maki restaurant': 0.0,
'mangia tutti ristorante': 0.0,
'manna': 0.0,
'marlowe': 0.0,
'marnee thai': 0.0,
'maverick': 0.0,
'mela tandoori kitchen': 0.0,
'mescolanza': 0.0,
'mezes': 0.0,
'michael mina restaurant': 0.0,
'millennium': 0.0,
'minako organic japanese restaurant': 0.0,
'minami restaurant': 0.0,
'mission chinese food': 0.0,
'mochica': 0.0,
'modern thai': 0.0,
'mona lisa restaurant': 0.0,
'mozzeria': 0.0,
'muguboka restaurant': 0.0,
'my tofu house': 0.0,
'nicaragua restaurant': 0.0,
'nob hill cafe': 0.0,
'nopa': 0.0,
'old jerusalem restaurant': 0.0,
'old skool cafe': 0.0,
'one market restaurant': 0.0,
'orexi': 0.0,
'original us restaurant': 0.0,
'osha thai': 0.0,
'oyaji restaurant': 0.0,
'ozumo': 0.0,
'pad thai restaurant': 0.0,
'panta rei restaurant': 0.0,
'park tavern': 0.0,
'pera': 0.0,
'piperade': 0.0,
'ploy 2': 0.0,
'poc chuc': 0.0,
'poesia': 0.0,
'prospect': 0.0,
'quince': 0.0,
'radius san francisco': 0.0,
'range': 0.0,
'red door cafe': 0.0,
'restaurant ducroix': 0.0,
'ristorante bacco': 0.0,
'ristorante ideale': 0.0,
'ristorante milano': 0.0,
'ristorante parma': 0.0,
'rn74': 0.0,
'rue lepic': 0.0,
'saha': 0.0,
'sai jai thai restaurant': 0.0,
'salt house': 0.0,
'san tung chinese restaurant': 0.0,
'san wang restaurant': 0.0,
'sanjalisco': 0.0,
'sanraku': 0.0,
'seasons': 0.0,
'seoul garden': 0.0,
'seven hills': 0.0,
'shangri la vegetarian restaurant': 0.0,
'singapore malaysian restaurant': 0.0,
'skool': 0.0,
'so': 0.0,
'sotto mare': 0.0,
'source': 0.0,
'specchio ristorante': 0.0,
'spruce': 0.0,
'straits restaurant': 0.0,
'stroganoff restaurant': 0.0,
'sunflower potrero hill': 0.0,
'sushi bistro': 0.0,
'taiwan restaurant': 0.0,
'tanuki restaurant': 0.0,
'tataki': 0.0,
'tekka japanese restaurant': 0.0,
'thai cottage restaurant': 0.0,
'thai house express': 0.0,
'thai idea vegetarian': 0.0,
'thai time restaurant': 0.0,
'thanh long': 0.0,
'the big 4 restaurant': 0.0,
'the blue plate': 0.0,
'the house': 0.0,
'the richmond': 0.0,
'the slanted door': 0.0,
'the stinking rose': 0.0,
'thep phanom thai restaurant': 0.0,
'tommys joynt': 0.0,
'toraya japanese restaurant': 0.0,
'town hall': 0.0,
'trattoria contadina': 0.0,
'tu lan': 0.0,
'tuba restaurant': 0.0,
'u lee restaurant': 0.0,
'udupi palace': 0.0,
'venticello ristorante': 0.0,
'vicoletto': 0.0,
'yank sing': 0.0,
'yummy yummy': 0.0,
'z and y restaurant': 0.0,
'zadin': 0.0,
'zare at fly trap': 0.0,
'zarzuela': 0.0,
'zen yai thai restaurant': 0.0,
'zuni cafe': 0.0,
'zushi puzzle': 0.0},
'near': {'**NONE**': 0.13300733496332517,
'bayview hunters point': 0.0,
'dontcare': 0.15859820700896493,
'haight': 0.0,
'japantown': 0.038712306438467806,
'marina cow hollow': 0.0,
'mission': 0.0,
'nopa': 0.669682151589242,
'north beach telegraph hill': 0.0,
'soma': 0.0,
'union square': 0.0},
'price': {'**NONE**': 1.0,
'10 dollar': 0.0,
'10 euro': 0.0,
'11 euro': 0.0,
'15 euro': 0.0,
'18 euro': 0.0,
'20 euro': 0.0,
'22 euro': 0.0,
'25 euro': 0.0,
'26 euro': 0.0,
'29 euro': 0.0,
'37 euro': 0.0,
'6': 0.0,
'7': 0.0,
'9': 0.0,
'between 0 and 15 euro': 0.0,
'between 10 and 13 euro': 0.0,
'between 10 and 15 euro': 0.0,
'between 10 and 18 euro': 0.0,
'between 10 and 20 euro': 0.0,
'between 10 and 23 euro': 0.0,
'between 10 and 30 euro': 0.0,
'between 11 and 15 euro': 0.0,
'between 11 and 18 euro': 0.0,
'between 11 and 22 euro': 0.0,
'between 11 and 25 euro': 0.0,
'between 11 and 29 euro': 0.0,
'between 11 and 35 euro': 0.0,
'between 13 and 15 euro': 0.0,
'between 13 and 18 euro': 0.0,
'between 13 and 24 euro': 0.0,
'between 15 and 18 euro': 0.0,
'between 15 and 22 euro': 0.0,
'between 15 and 26 euro': 0.0,
'between 15 and 29 euro': 0.0,
'between 15 and 33 euro': 0.0,
'between 15 and 44 euro': 0.0,
'between 15 and 58 euro': 0.0,
'between 18 and 26 euro': 0.0,
'between 18 and 29 euro': 0.0,
'between 18 and 44 euro': 0.0,
'between 18 and 55 euro': 0.0,
'between 18 and 58 euro': 0.0,
'between 18 and 73 euro': 0.0,
'between 18 and 78 euro': 0.0,
'between 2 and 15 euro': 0.0,
'between 20 and 30 euro': 0.0,
'between 21 and 23 euro': 0.0,
'between 22 and 29 euro': 0.0,
'between 22 and 30 dollar': 0.0,
'between 22 and 37 euro': 0.0,
'between 22 and 58 euro': 0.0,
'between 22 and 73 euro': 0.0,
'between 23 and 29': 0.0,
'between 23 and 29 euro': 0.0,
'between 23 and 37 euro': 0.0,
'between 23 and 58': 0.0,
'between 23 and 58 euro': 0.0,
'between 26 and 33 euro': 0.0,
'between 26 and 34 euro': 0.0,
'between 26 and 37 euro': 0.0,
'between 29 and 37 euro': 0.0,
'between 29 and 44 euro': 0.0,
'between 29 and 58 euro': 0.0,
'between 29 and 73 euro': 0.0,
'between 30 and 58': 0.0,
'between 30 and 58 euro': 0.0,
'between 31 and 50 euro': 0.0,
'between 37 and 110 euro': 0.0,
'between 37 and 44 euro': 0.0,
'between 37 and 58 euro': 0.0,
'between 4 and 22 euro': 0.0,
'between 4 and 58 euro': 0.0,
'between 5 an 30 euro': 0.0,
'between 5 and 10 euro': 0.0,
'between 5 and 11 euro': 0.0,
'between 5 and 15 dollar': 0.0,
'between 5 and 20 euro': 0.0,
'between 5 and 25 euro': 0.0,
'between 6 and 10 euro': 0.0,
'between 6 and 11 euro': 0.0,
'between 6 and 15 euro': 0.0,
'between 6 and 29 euro': 0.0,
'between 7 and 11 euro': 0.0,
'between 7 and 13 euro': 0.0,
'between 7 and 15 euro': 0.0,
'between 7 and 37 euro': 0.0,
'between 8 and 22 euro': 0.0,
'between 9 and 13 dolllar': 0.0,
'between 9 and 15 euro': 0.0,
'between 9 and 58 euro': 0.0,
'bteween 11 and 15 euro': 0.0,
'bteween 15 and 22 euro': 0.0,
'bteween 22 and 37': 0.0,
'bteween 30 and 58 euro': 0.0,
'bteween 51 and 73 euro': 0.0,
'netween 20 and 30 euro': 0.0},
'pricerange': {'**NONE**': 0.22571148184494605,
'cheap': 0.0,
'dontcare': 0.774288518155054,
'expensive': 0.0,
'moderate': 0.0},
'requested': {'addr': 0.0,
'allowedforkids': 0.0,
'area': 0.0,
'food': 0.0,
'goodformeal': 0.0,
'name': 0.0,
'near': 0.0,
'phone': 0.0,
'postcode': 0.0,
'price': 0.0,
'pricerange': 0.0}},
'features': {'inform_info': [False,
False,
False,
True,
True,
False,
False,
False,
True,
True,
False,
True,
False,
False,
False,
False,
True,
False,
False,
False,
False,
True,
False,
False,
False],
'informedVenueSinceNone': [],
'lastActionInformNone': False,
'lastInformedVenue': '',
'offerHappened': False},
'userActs': [('inform(allowedforkids="1")', 0.90842356395668944),
('inform(allowedforkids="dontcare")', 0.0091759955955221153),
('inform(allowedforkids="0")', 0.0091759955955221153),
('inform(postcode)', 0.025509267755551478),
('inform(area="stonestown")', 0.024683428151954491),
('null()', 0.023031748944760511)]}
b3 = {'beliefs': {'area': {'**NONE**': 0.12910550615265692,
'centre': 0.8338099777773861,
'dontcare': 0.0,
'east': 0.03708451606995696,
'north': 0.0,
'south': 0.0,
'west': 0.0},
'discourseAct': {'ack': 0.0,
'bye': 0.0,
'hello': 0.0,
'none': 1.0,
'repeat': 0.0,
'silence': 0.0,
'thankyou': 0.0},
'food': {'**NONE**': 0.020895546925810415,
'afghan': 0.0,
'african': 0.0,
'afternoon tea': 0.0,
'asian oriental': 0.0,
'australasian': 0.0,
'australian': 0.0,
'austrian': 0.0,
'barbeque': 0.0,
'basque': 0.0,
'belgian': 0.0,
'bistro': 0.0,
'brazilian': 0.0,
'british': 0.0,
'canapes': 0.0,
'cantonese': 0.0,
'caribbean': 0.0,
'catalan': 0.0,
'chinese': 0.0,
'christmas': 0.0,
'corsica': 0.0,
'creative': 0.0,
'crossover': 0.0,
'cuban': 0.0,
'danish': 0.0,
'dontcare': 0.0,
'eastern european': 0.0,
'english': 0.0,
'eritrean': 0.0,
'european': 0.0,
'french': 0.0,
'fusion': 0.0,
'gastropub': 0.0,
'german': 0.0,
'greek': 0.0,
'halal': 0.0,
'hungarian': 0.0,
'indian': 0.0,
'indonesian': 0.0,
'international': 0.0,
'irish': 0.0,
'italian': 0.0,
'jamaican': 0.0,
'japanese': 0.0,
'korean': 0.0,
'kosher': 0.0,
'latin american': 0.0,
'lebanese': 0.0,
'light bites': 0.0,
'malaysian': 0.0,
'mediterranean': 0.9791044530741896,
'mexican': 0.0,
'middle eastern': 0.0,
'modern american': 0.0,
'modern eclectic': 0.0,
'modern european': 0.0,
'modern global': 0.0,
'molecular gastronomy': 0.0,
'moroccan': 0.0,
'new zealand': 0.0,
'north african': 0.0,
'north american': 0.0,
'north indian': 0.0,
'northern european': 0.0,
'panasian': 0.0,
'persian': 0.0,
'polish': 0.0,
'polynesian': 0.0,
'portuguese': 0.0,
'romanian': 0.0,
'russian': 0.0,
'scandinavian': 0.0,
'scottish': 0.0,
'seafood': 0.0,
'singaporean': 0.0,
'south african': 0.0,
'south indian': 0.0,
'spanish': 0.0,
'sri lankan': 0.0,
'steakhouse': 0.0,
'swedish': 0.0,
'swiss': 0.0,
'thai': 0.0,
'the americas': 0.0,
'traditional': 0.0,
'turkish': 0.0,
'tuscan': 0.0,
'unusual': 0.0,
'vegetarian': 0.0,
'venetian': 0.0,
'vietnamese': 0.0,
'welsh': 0.0,
'world': 0.0},
'method': {'byalternatives': 0.0,
'byconstraints': 0.6359877465366015,
'byname': 0.0,
'finished': 0.0,
'none': 0.0,
'restart': 0.0},
'name': {'**NONE**': 1.0,
'ali baba': 0.0,
'anatolia': 0.0,
'ask': 0.0,
'backstreet bistro': 0.0,
'bangkok city': 0.0,
'bedouin': 0.0,
'bloomsbury restaurant': 0.0,
'caffe uno': 0.0,
'cambridge lodge restaurant': 0.0,
'charlie chan': 0.0,
'chiquito restaurant bar': 0.0,
'city stop restaurant': 0.0,
'clowns cafe': 0.0,
'cocum': 0.0,
'cote': 0.0,
'cotto': 0.0,
'curry garden': 0.0,
'curry king': 0.0,
'curry prince': 0.0,
'curry queen': 0.0,
'da vince pizzeria': 0.0,
'da vinci pizzeria': 0.0,
'darrys cookhouse and wine shop': 0.0,
'de luca cucina and bar': 0.0,
'dojo noodle bar': 0.0,
'don pasquale pizzeria': 0.0,
'efes restaurant': 0.0,
'eraina': 0.0,
'fitzbillies restaurant': 0.0,
'frankie and bennys': 0.0,
'galleria': 0.0,
'golden house': 0.0,
'golden wok': 0.0,
'gourmet burger kitchen': 0.0,
'graffiti': 0.0,
'grafton hotel restaurant': 0.0,
'hakka': 0.0,
'hk fusion': 0.0,
'hotel du vin and bistro': 0.0,
'india house': 0.0,
'j restaurant': 0.0,
'jinling noodle bar': 0.0,
'kohinoor': 0.0,
'kymmoy': 0.0,
'la margherita': 0.0,
'la mimosa': 0.0,
'la raza': 0.0,
'la tasca': 0.0,
'lan hong house': 0.0,
'little seoul': 0.0,
'loch fyne': 0.0,
'mahal of cambridge': 0.0,
'maharajah tandoori restaurant': 0.0,
'meghna': 0.0,
'meze bar restaurant': 0.0,
'michaelhouse cafe': 0.0,
'midsummer house restaurant': 0.0,
'nandos': 0.0,
'nandos city centre': 0.0,
'panahar': 0.0,
'peking restaurant': 0.0,
'pipasha restaurant': 0.0,
'pizza express': 0.0,
'pizza express fen ditton': 0.0,
'pizza hut': 0.0,
'pizza hut cherry hinton': 0.0,
'pizza hut city centre': 0.0,
'pizza hut fen ditton': 0.0,
'prezzo': 0.0,
'rajmahal': 0.0,
'restaurant alimentum': 0.0,
'restaurant one seven': 0.0,
'restaurant two two': 0.0,
'rice boat': 0.0,
'rice house': 0.0,
'riverside brasserie': 0.0,
'royal spice': 0.0,
'royal standard': 0.0,
'saffron brasserie': 0.0,
'saigon city': 0.0,
'saint johns chop house': 0.0,
'sala thong': 0.0,
'sesame restaurant and bar': 0.0,
'shanghai family restaurant': 0.0,
'shiraz restaurant': 0.0,
'sitar tandoori': 0.0,
'stazione restaurant and coffee bar': 0.0,
'taj tandoori': 0.0,
'tandoori palace': 0.0,
'tang chinese': 0.0,
'thanh binh': 0.0,
'the cambridge chop house': 0.0,
'the copper kettle': 0.0,
'the cow pizza kitchen and bar': 0.0,
'the gandhi': 0.0,
'the gardenia': 0.0,
'the golden curry': 0.0,
'the good luck chinese food takeaway': 0.0,
'the hotpot': 0.0,
'the lucky star': 0.0,
'the missing sock': 0.0,
'the nirala': 0.0,
'the oak bistro': 0.0,
'the river bar steakhouse and grill': 0.0,
'the slug and lettuce': 0.0,
'the varsity restaurant': 0.0,
'travellers rest': 0.0,
'ugly duckling': 0.0,
'venue': 0.0,
'wagamama': 0.0,
'yippee noodle bar': 0.0,
'yu garden': 0.0,
'zizzi cambridge': 0.0},
'pricerange': {'**NONE**': 0.1340777132648503,
'cheap': 0.0,
'dontcare': 0.8659222867351497,
'expensive': 0.0,
'moderate': 0.0},
'requested': {'addr': 0.0,
'area': 0.0,
'description': 0.0,
'food': 0.0,
'name': 0.0,
'phone': 0.0,
'postcode': 0.0,
'pricerange': 0.0,
'signature': 0.0}},
'features': {'inform_info': [False,
False,
True,
False,
True,
False,
False,
True,
False,
False,
False,
False,
True,
False,
False,
False,
False,
True,
False,
False,
False,
False,
True,
False,
False],
'informedVenueSinceNone': [],
'lastActionInformNone': False,
'lastInformedVenue': '',
'offerHappened': False},
'userActs': [('inform(food="mediterranean")', 0.84415346579983519),
('inform(area="east")', 0.037084516069956962),
('null()', 0.048530354363153554),
('reqmore()', 0.04541708634740408),
('confirm(phone)', 0.024814577419650211)]}
return b1, b2, b3
def main():
"""
unit test
:return:
"""
Settings.init('config/Tut-gp-Multidomain.cfg', 12345)
Ontology.init_global_ontology()
b1, b2, b3 = get_test_beliefs()
'''state1 = DIP_state(b1, domainString='SFRestaurants')
state2 = DIP_state(b2, domainString='SFRestaurants')
state3 = DIP_state(b3, domainString='CamRestaurants')'''
state1 = padded_state(b1, domainString='SFRestaurants')
state2 = padded_state(b2, domainString='SFRestaurants')
state3 = padded_state(b3, domainString='CamRestaurants')
print(state1.get_beliefStateVec('area')[:state1.max_v])
print(len(state2.get_beliefStateVec('near'))-state2.max_v)
print(len(state3.get_beliefStateVec('pricerange'))-state3.max_v)
#print len(state3.get_beliefStateVec('general'))
s2 = state2.get_beliefStateVec('food')
s3 = state3.get_beliefStateVec('food')
a=1
#print state3.get_beliefStateVec('general')[:state2.max_v]
#print state2.max_v
#print state3.max_v
if __name__ == '__main__':
main()
| 29.386555
| 215
| 0.580733
| 8,968
| 59,449
| 3.7907
| 0.130687
| 0.083836
| 0.030004
| 0.052772
| 0.807942
| 0.784292
| 0.775438
| 0.768878
| 0.764612
| 0.762083
| 0
| 0.096989
| 0.243133
| 59,449
| 2,022
| 216
| 29.401088
| 0.65854
| 0.057949
| 0
| 0.814041
| 0
| 0
| 0.384887
| 0.004221
| 0
| 0
| 0
| 0
| 0.000536
| 1
| 0.01179
| false
| 0
| 0.003751
| 0.002144
| 0.025723
| 0.001608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
766eee39b7b00ff848158f1d742e9b1accbd31d8
| 250
|
py
|
Python
|
atividades01/Exercicio11/modulo.py
|
fernando44/Estudando-Python
|
ead53df1d1753f630575ca4b1be70d2b53657d6a
|
[
"MIT"
] | null | null | null |
atividades01/Exercicio11/modulo.py
|
fernando44/Estudando-Python
|
ead53df1d1753f630575ca4b1be70d2b53657d6a
|
[
"MIT"
] | null | null | null |
atividades01/Exercicio11/modulo.py
|
fernando44/Estudando-Python
|
ead53df1d1753f630575ca4b1be70d2b53657d6a
|
[
"MIT"
] | null | null | null |
def soma(var1, var2):
var = var1 +var2
return var
def subtracao(var1, var2):
var =var1 - var2
return var
def multiplicacao(var1, var2):
var =var1 * var2
return var
def divisao(var1, var2):
var =var1/var2
return var
| 15.625
| 30
| 0.628
| 36
| 250
| 4.361111
| 0.25
| 0.407643
| 0.280255
| 0.382166
| 0.770701
| 0.770701
| 0.770701
| 0.592357
| 0
| 0
| 0
| 0.088398
| 0.276
| 250
| 15
| 31
| 16.666667
| 0.779006
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
4f21fd26ea19653fccfcc687ef5a731e1e2fb8c0
| 122
|
py
|
Python
|
tests/test_sanity.py
|
ffreemt/koyeb-nb2
|
b1bf1c53e3a36e2f9153bf52c5a49974a4c5bb1e
|
[
"MIT"
] | 4
|
2021-07-22T06:25:20.000Z
|
2022-01-25T07:45:29.000Z
|
tests/test_sanity.py
|
ffreemt/koyeb-nb2
|
b1bf1c53e3a36e2f9153bf52c5a49974a4c5bb1e
|
[
"MIT"
] | null | null | null |
tests/test_sanity.py
|
ffreemt/koyeb-nb2
|
b1bf1c53e3a36e2f9153bf52c5a49974a4c5bb1e
|
[
"MIT"
] | 1
|
2022-02-21T04:57:13.000Z
|
2022-02-21T04:57:13.000Z
|
"""Test sanity."""
from koyeb_nb2 import koyeb_nb2
def test_sanity():
"""Test sanity."""
assert not koyeb_nb2()
| 15.25
| 31
| 0.655738
| 17
| 122
| 4.470588
| 0.529412
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.188525
| 122
| 7
| 32
| 17.428571
| 0.737374
| 0.204918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4f823082ea3ea6dd2a767ce3eca0c7289f9b197b
| 8,843
|
py
|
Python
|
rouge_papier_v2/rouge_papier_v2/generate.py
|
BambooPalace/text-summarization
|
17ac68598563492b5e8959493b2bf1b137f78a5a
|
[
"MIT"
] | 54
|
2019-09-20T12:31:10.000Z
|
2022-03-19T12:21:32.000Z
|
rouge_papier_v2/rouge_papier_v2/generate.py
|
huaweicould-ei/ExtSummLongDoc
|
43da8584a1ec5df6ed31a844285a12b71eb2b4a8
|
[
"MIT"
] | 9
|
2019-11-25T06:17:11.000Z
|
2022-03-23T04:08:53.000Z
|
rouge_papier_v2/rouge_papier_v2/generate.py
|
huaweicould-ei/ExtSummLongDoc
|
43da8584a1ec5df6ed31a844285a12b71eb2b4a8
|
[
"MIT"
] | 12
|
2019-12-08T10:06:05.000Z
|
2022-03-06T08:10:53.000Z
|
from .util import TempFileManager, make_simple_config_text
from . import wrapper
import numpy as np
def compute_extract(sentences, summaries, mode="independent", ngram=1,
length=100, length_unit="word", remove_stopwords=False):
if mode == "independent":
return compute_greedy_independent_extract(
sentences, summaries, ngram, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
elif mode == "sequential":
return compute_greedy_sequential_extract(
sentences, summaries, ngram, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords), None
else:
raise Exception("mode must be 'independent' or 'sequential'")
def compute_pairwise_ranks(sentences, summaries, mode="independent", ngram=1,
length=100, length_unit="word",
remove_stopwords=False):
if mode == "independent":
return compute_greedy_independent_pairwise_ranks(
sentences, summaries, ngram, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
elif mode == "sequential":
return compute_greedy_sequential_pairwise_ranks(
sentences, summaries, ngram, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
else:
raise Exception("mode must be 'independent' or 'sequential'")
def compute_greedy_independent_extract(sentences, summaries, order,
length=100, length_unit="word",
remove_stopwords=False):
with TempFileManager() as manager:
input_paths = manager.create_temp_files(sentences)
summary_paths = manager.create_temp_files(summaries)
config_text = make_simple_config_text([[input_path, summary_paths]
for input_path in input_paths])
config_path = manager.create_temp_file(config_text)
if order == "L":
df = wrapper.compute_rouge(
config_path, max_ngram=0, lcs=True, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
else:
order = int(order)
df = wrapper.compute_rouge(
config_path, max_ngram=order, lcs=False, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
scores = df["rouge-{}".format(order)].values.ravel()[:-1]
ranked_indices = [i for i in np.argsort(scores)[::-1]]
candidate_extracts = []
agg_texts = []
for i in ranked_indices:
agg_texts.append(sentences[i])
candidate_extracts.append("\n".join(agg_texts))
input_paths = manager.create_temp_files(candidate_extracts)
config_text = make_simple_config_text([[input_path, summary_paths]
for input_path in input_paths])
config_path = manager.create_temp_file(config_text)
if order == "L":
df = wrapper.compute_rouge(
config_path, max_ngram=0, lcs=True, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
else:
df = wrapper.compute_rouge(
config_path, max_ngram=order, lcs=False, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
opt_sent_length = np.argmax(
df["rouge-{}".format(order)].values.ravel()[:-1])
extract_indices = ranked_indices[:opt_sent_length + 1]
labels = [0] * len(sentences)
for rank, index in enumerate(extract_indices, 1):
labels[index] = rank
pairwise_ranks = []
for i, top_index in enumerate(ranked_indices[:5]):
for bottom_index in ranked_indices[i+1:]:
pairwise_ranks.append((int(top_index), int(bottom_index)))
return labels, pairwise_ranks
def compute_greedy_sequential_extract(sentences, summaries, order,
length=100, length_unit="word",
remove_stopwords=False):
with TempFileManager() as manager:
summary_paths = manager.create_temp_files(summaries)
options = [(i, sent) for i, sent in enumerate(sentences)]
current_indices = []
current_summary_sents = []
current_score = 0
while len(options) > 0:
candidates = []
for idx, sent in options:
candidates.append("\n".join(current_summary_sents + [sent]))
candidate_paths = manager.create_temp_files(candidates)
config_text = make_simple_config_text(
[[cand_path, summary_paths] for cand_path in candidate_paths])
config_path = manager.create_temp_file(config_text)
if order == "L":
df = wrapper.compute_rouge(
config_path, max_ngram=0, lcs=True, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
else:
order = int(order)
df = wrapper.compute_rouge(
config_path, max_ngram=order, lcs=False, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
scores = df["rouge-{}".format(order)].values.ravel()[:-1]
ranked_indices = [i for i in np.argsort(scores)[::-1]]
if scores[ranked_indices[0]] > current_score:
current_score = scores[ranked_indices[0]]
current_indices.append(options[ranked_indices[0]][0])
current_summary_sents.append(options[ranked_indices[0]][1])
options.pop(ranked_indices[0])
else:
break
labels = [0] * len(sentences)
for rank, index in enumerate(current_indices, 1):
labels[index] = rank
return labels
def compute_greedy_sequential_pairwise_ranks(sentences, summaries, order,
length=100, length_unit="word",
remove_stopwords=False):
with TempFileManager() as manager:
summary_paths = manager.create_temp_files(summaries)
options = [(i, sent) for i, sent in enumerate(sentences)]
current_indices = []
current_summary_sents = []
current_score = 0
from collections import defaultdict
ranks = defaultdict(list)
while len(options) > 0:
candidates = []
for idx, sent in options:
candidates.append("\n".join(current_summary_sents + [sent]))
candidate_paths = manager.create_temp_files(candidates)
config_text = make_simple_config_text(
[[cand_path, summary_paths] for cand_path in candidate_paths])
config_path = manager.create_temp_file(config_text)
if order == "L":
df = wrapper.compute_rouge(
config_path, max_ngram=0, lcs=True, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
else:
order = int(order)
df = wrapper.compute_rouge(
config_path, max_ngram=order, lcs=False, length=length,
length_unit=length_unit, remove_stopwords=remove_stopwords)
scores = df["rouge-{}".format(order)].values.ravel()[:-1]
ranked_indices = [i for i in np.argsort(scores)[::-1]]
ranked_options = [options[i][0] for i in ranked_indices]
if scores[ranked_indices[0]] > current_score:
current_score = scores[ranked_indices[0]]
current_indices.append(options[ranked_indices[0]][0])
current_summary_sents.append(options[ranked_indices[0]][1])
options.pop(ranked_indices[0])
print(scores)
print(ranked_options)
for i in range(5):
for rank in [(ranked_options[i], j)
for j in ranked_options[i+1:]]:
ranks[tuple(sorted(rank))].append(rank)
for x, y in ranks.items():
print(x, y)
print("")
else:
break
exit()
labels = [0] * len(sentences)
for rank, index in enumerate(current_indices, 1):
labels[index] = rank
return labels
| 39.833333
| 79
| 0.579102
| 933
| 8,843
| 5.22508
| 0.118971
| 0.059487
| 0.044308
| 0.054154
| 0.858051
| 0.845538
| 0.824205
| 0.801846
| 0.801846
| 0.793231
| 0
| 0.009814
| 0.331675
| 8,843
| 221
| 80
| 40.013575
| 0.815059
| 0
| 0
| 0.742515
| 0
| 0
| 0.023748
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02994
| false
| 0
| 0.023952
| 0
| 0.095808
| 0.023952
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f907bf4a329fdb73f6668bfebf7117ea930d72e
| 7,443
|
py
|
Python
|
tests/handling/test_cause_handling.py
|
thevennamaneni/kopf
|
020f8bc91268225d43575e1bb69470ef10ae6113
|
[
"MIT"
] | null | null | null |
tests/handling/test_cause_handling.py
|
thevennamaneni/kopf
|
020f8bc91268225d43575e1bb69470ef10ae6113
|
[
"MIT"
] | null | null | null |
tests/handling/test_cause_handling.py
|
thevennamaneni/kopf
|
020f8bc91268225d43575e1bb69470ef10ae6113
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
import kopf
from kopf.reactor.causation import CREATE, UPDATE, DELETE, NEW, GONE, FREE, NOOP
from kopf.reactor.handling import custom_object_handler
from kopf.structs.finalizers import FINALIZER
from kopf.structs.lastseen import LAST_SEEN_ANNOTATION
async def test_new(registry, handlers, resource, cause_mock,
caplog, assert_logs, k8s_mocked):
caplog.set_level(logging.DEBUG)
cause_mock.event = NEW
await custom_object_handler(
lifecycle=kopf.lifecycles.all_at_once,
registry=registry,
resource=resource,
event={'type': 'irrelevant', 'object': cause_mock.body},
freeze=asyncio.Event(),
)
assert not handlers.create_mock.called
assert not handlers.update_mock.called
assert not handlers.delete_mock.called
assert k8s_mocked.asyncio_sleep.call_count == 0
assert k8s_mocked.post_event.call_count == 0
assert k8s_mocked.patch_obj.call_count == 1
patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch']
assert 'metadata' in patch
assert 'finalizers' in patch['metadata']
assert FINALIZER in patch['metadata']['finalizers']
assert_logs([
"First appearance",
"Adding the finalizer",
"Patching with",
])
async def test_create(registry, handlers, resource, cause_mock,
caplog, assert_logs, k8s_mocked):
caplog.set_level(logging.DEBUG)
cause_mock.event = CREATE
await custom_object_handler(
lifecycle=kopf.lifecycles.all_at_once,
registry=registry,
resource=resource,
event={'type': 'irrelevant', 'object': cause_mock.body},
freeze=asyncio.Event(),
)
assert handlers.create_mock.call_count == 1
assert not handlers.update_mock.called
assert not handlers.delete_mock.called
assert k8s_mocked.asyncio_sleep.call_count == 0
assert k8s_mocked.post_event.call_count >= 1
assert k8s_mocked.patch_obj.call_count == 1
patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch']
assert 'metadata' in patch
assert 'annotations' in patch['metadata']
assert LAST_SEEN_ANNOTATION in patch['metadata']['annotations']
assert 'status' in patch
assert 'kopf' in patch['status']
assert 'progress' in patch['status']['kopf']
assert patch['status']['kopf']['progress'] is None # 1 out of 1 handlers done
assert_logs([
"Creation event:",
"Invoking handler 'create_fn'",
"Handler 'create_fn' succeeded",
"All handlers succeeded",
"Patching with",
])
async def test_update(registry, handlers, resource, cause_mock,
caplog, assert_logs, k8s_mocked):
caplog.set_level(logging.DEBUG)
cause_mock.event = UPDATE
await custom_object_handler(
lifecycle=kopf.lifecycles.all_at_once,
registry=registry,
resource=resource,
event={'type': 'irrelevant', 'object': cause_mock.body},
freeze=asyncio.Event(),
)
assert not handlers.create_mock.called
assert handlers.update_mock.call_count == 1
assert not handlers.delete_mock.called
assert k8s_mocked.asyncio_sleep.call_count == 0
assert k8s_mocked.post_event.call_count >= 1
assert k8s_mocked.patch_obj.call_count == 1
patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch']
assert 'metadata' in patch
assert 'annotations' in patch['metadata']
assert LAST_SEEN_ANNOTATION in patch['metadata']['annotations']
assert 'status' in patch
assert 'kopf' in patch['status']
assert 'progress' in patch['status']['kopf']
assert patch['status']['kopf']['progress'] is None # 1 out of 1 handlers done
assert_logs([
"Update event:",
"Invoking handler 'update_fn'",
"Handler 'update_fn' succeeded",
"All handlers succeeded",
"Patching with",
])
async def test_delete(registry, handlers, resource, cause_mock,
caplog, assert_logs, k8s_mocked):
caplog.set_level(logging.DEBUG)
cause_mock.event = DELETE
await custom_object_handler(
lifecycle=kopf.lifecycles.all_at_once,
registry=registry,
resource=resource,
event={'type': 'irrelevant', 'object': cause_mock.body},
freeze=asyncio.Event(),
)
assert not handlers.create_mock.called
assert not handlers.update_mock.called
assert handlers.delete_mock.call_count == 1
assert k8s_mocked.asyncio_sleep.call_count == 0
assert k8s_mocked.post_event.call_count >= 1
assert k8s_mocked.patch_obj.call_count == 1
patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch']
assert 'status' in patch
assert 'kopf' in patch['status']
assert 'progress' in patch['status']['kopf']
assert patch['status']['kopf']['progress'] is None # 1 out of 1 handlers done
assert_logs([
"Deletion event",
"Invoking handler 'delete_fn'",
"Handler 'delete_fn' succeeded",
"All handlers succeeded",
"Removing the finalizer",
"Patching with",
])
#
# Informational causes: just log, and do nothing else.
#
async def test_gone(registry, handlers, resource, cause_mock,
caplog, assert_logs, k8s_mocked):
caplog.set_level(logging.DEBUG)
cause_mock.event = GONE
await custom_object_handler(
lifecycle=kopf.lifecycles.all_at_once,
registry=registry,
resource=resource,
event={'type': 'irrelevant', 'object': cause_mock.body},
freeze=asyncio.Event(),
)
assert not handlers.create_mock.called
assert not handlers.update_mock.called
assert not handlers.delete_mock.called
assert not k8s_mocked.asyncio_sleep.called
assert not k8s_mocked.post_event.called
assert not k8s_mocked.patch_obj.called
assert_logs([
"Deleted, really deleted",
])
async def test_free(registry, handlers, resource, cause_mock,
caplog, assert_logs, k8s_mocked):
caplog.set_level(logging.DEBUG)
cause_mock.event = FREE
await custom_object_handler(
lifecycle=kopf.lifecycles.all_at_once,
registry=registry,
resource=resource,
event={'type': 'irrelevant', 'object': cause_mock.body},
freeze=asyncio.Event(),
)
assert not handlers.create_mock.called
assert not handlers.update_mock.called
assert not handlers.delete_mock.called
assert not k8s_mocked.asyncio_sleep.called
assert not k8s_mocked.post_event.called
assert not k8s_mocked.patch_obj.called
assert_logs([
"Deletion event, but we are done with it",
])
async def test_noop(registry, handlers, resource, cause_mock,
caplog, assert_logs, k8s_mocked):
caplog.set_level(logging.DEBUG)
cause_mock.event = NOOP
await custom_object_handler(
lifecycle=kopf.lifecycles.all_at_once,
registry=registry,
resource=resource,
event={'type': 'irrelevant', 'object': cause_mock.body},
freeze=asyncio.Event(),
)
assert not handlers.create_mock.called
assert not handlers.update_mock.called
assert not handlers.delete_mock.called
assert not k8s_mocked.asyncio_sleep.called
assert not k8s_mocked.post_event.called
assert not k8s_mocked.patch_obj.called
assert_logs([
"Something has changed, but we are not interested",
])
| 31.142259
| 82
| 0.681177
| 930
| 7,443
| 5.245161
| 0.111828
| 0.059041
| 0.058426
| 0.050636
| 0.834358
| 0.822263
| 0.820008
| 0.814268
| 0.814268
| 0.814268
| 0
| 0.010503
| 0.219669
| 7,443
| 238
| 83
| 31.273109
| 0.829373
| 0.017063
| 0
| 0.737968
| 0
| 0
| 0.130114
| 0
| 0
| 0
| 0
| 0
| 0.411765
| 1
| 0
| false
| 0
| 0.037433
| 0
| 0.037433
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8b0482aa55281b3c2e8aa6e9f6398bb15e2278a1
| 16,859
|
py
|
Python
|
pyradox/convnets/resnet.py
|
p4vv37/pyradox
|
cfc8c07d637a1cc189dd8d200f8a55d00405b81f
|
[
"MIT"
] | 61
|
2021-01-10T09:31:32.000Z
|
2022-02-13T13:30:48.000Z
|
pyradox/convnets/resnet.py
|
p4vv37/pyradox
|
cfc8c07d637a1cc189dd8d200f8a55d00405b81f
|
[
"MIT"
] | 1
|
2021-04-24T12:03:19.000Z
|
2021-04-24T12:03:19.000Z
|
pyradox/convnets/resnet.py
|
p4vv37/pyradox
|
cfc8c07d637a1cc189dd8d200f8a55d00405b81f
|
[
"MIT"
] | 6
|
2021-01-17T16:17:35.000Z
|
2022-02-13T13:30:49.000Z
|
import math, copy
from functools import reduce
from tensorflow.keras import layers
from pyradox.modules import *
from tensorflow.keras.activations import swish
from tensorflow.nn import relu6
def relu(x):
return layers.ReLU()(x)
def hard_sigmoid(x):
return layers.ReLU(6.0)(x + 3.0) * (1.0 / 6.0)
def hard_swish(x):
return layers.Multiply()([hard_sigmoid(x), x])
class ResNet(layers.Layer):
"""Customized Implementation of ResNet
Args:
resnet_config (list of tuples of 2 int): filters of the bottleneck layer in a block, blocks in the stacked blocks
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
resnet_config,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs,
):
super().__init__()
self.resnet_config = resnet_config
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(x)
x = layers.Conv2D(64, 7, strides=2, use_bias=self.use_bias, **self.kwargs)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2)(x)
for i, (filters, blocks) in enumerate(self.resnet_config):
if i == 0:
x = ResNetBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
stride=2,
**self.kwargs,
)(x)
for _ in range(2, blocks + 1):
x = ResNetBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
conv_shortcut=False,
**self.kwargs,
)(x)
else:
x = ResNetBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
**self.kwargs,
)(x)
for _ in range(2, blocks + 1):
x = ResNetBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
conv_shortcut=False,
**self.kwargs,
)(x)
return x
class ResNet50(ResNet):
"""Customized Implementation of ResNet50
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 6), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNet101(ResNet):
"""Customized Implementation of ResNet101
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 23), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNet152(ResNet):
"""Customized Implementation of ResNet152
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 8), (256, 36), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNetV2(layers.Layer):
"""Customized Implementation of ResNetV2
Args:
resnet_config (list of tuples of 2 int): filters of the bottleneck layer in a block, blocks in the stacked blocks
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
resnet_config,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs,
):
super().__init__()
self.resnet_config = resnet_config
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(x)
x = layers.Conv2D(64, 7, strides=2, use_bias=self.use_bias, **self.kwargs)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2)(x)
for i, (filters, blocks) in enumerate(self.resnet_config):
if i == len(self.resnet_config) - 1:
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
**self.kwargs,
)(x)
for _ in range(2, blocks + 1):
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
conv_shortcut=False,
**self.kwargs,
)(x)
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
stride=1,
**self.kwargs,
)(x)
else:
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
**self.kwargs,
)(x)
for _ in range(2, blocks + 1):
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
conv_shortcut=False,
**self.kwargs,
)(x)
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
stride=2,
**self.kwargs,
)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
return x
class ResNet50V2(ResNetV2):
"""Customized Implementation of ResNet50V2
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 6), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNet101V2(ResNetV2):
"""Customized Implementation of ResNet101V2
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 23), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNet152V2(ResNetV2):
"""Customized Implementation of ResNet152V2
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 8), (256, 36), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNeXt(layers.Layer):
"""Customized Implementation of ResNeXt
Args:
resnet_config (list of tuples of 2 int): filters of the bottleneck layer in a block, blocks in the stacked blocks
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
resnet_config,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs,
):
super().__init__()
self.resnet_config = resnet_config
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(x)
x = layers.Conv2D(64, 7, strides=2, use_bias=self.use_bias, **self.kwargs)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2)(x)
for i, (filters, blocks) in enumerate(self.resnet_config):
x = ResNeXtBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
stride=2,
**self.kwargs,
)(x)
for _ in range(2, blocks + 1):
x = ResNeXtBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
conv_shortcut=False,
**self.kwargs,
)(x)
return x
class ResNeXt50(ResNeXt):
"""Customized Implementation of ResNeXt50
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 6), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNeXt101(ResNeXt):
"""Customized Implementation of ResNeXt101
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 23), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNeXt152(ResNeXt):
"""Customized Implementation of ResNeXt152
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 8), (256, 36), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
| 38.490868
| 121
| 0.538229
| 1,728
| 16,859
| 5.121528
| 0.069444
| 0.061695
| 0.016271
| 0.04339
| 0.906893
| 0.892203
| 0.892203
| 0.892203
| 0.892203
| 0.892203
| 0
| 0.038516
| 0.371671
| 16,859
| 438
| 122
| 38.490868
| 0.796941
| 0.377543
| 0
| 0.866667
| 0
| 0
| 0.004765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.022222
| 0.011111
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c67bc9d5f4a90321154e47a5d2e59a76c24ffdd
| 60
|
py
|
Python
|
addons/account_edi_ubl/models/__init__.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/account_edi_ubl/models/__init__.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/account_edi_ubl/models/__init__.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
from . import account_move
from . import account_edi_format
| 20
| 32
| 0.833333
| 9
| 60
| 5.222222
| 0.666667
| 0.425532
| 0.723404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 60
| 2
| 33
| 30
| 0.903846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
506cfa3fd4face5da219e3b07b73401295f1685f
| 21,168
|
py
|
Python
|
dataloaders/dm_loader.py
|
iliasprc/MScThesis
|
dcfd70691ebb5c99c467d69d0946fe33d4d4d3e7
|
[
"MIT",
"Unlicense"
] | null | null | null |
dataloaders/dm_loader.py
|
iliasprc/MScThesis
|
dcfd70691ebb5c99c467d69d0946fe33d4d4d3e7
|
[
"MIT",
"Unlicense"
] | null | null | null |
dataloaders/dm_loader.py
|
iliasprc/MScThesis
|
dcfd70691ebb5c99c467d69d0946fe33d4d4d3e7
|
[
"MIT",
"Unlicense"
] | null | null | null |
import os
import random
from random import randint
import torch
from torch.utils.data import Dataset
from .utils.utils import read_data_, read_fidpnn_dataset
train_prefix = "train"
dev_prefix = "val"
test_prefix = "test"
class DMshort(Dataset):
def __init__(self, config, mode):
train_filepath = "data/idp_seq_2_seq/train/all_train.txt"
dev_filepath = "data/idp_seq_2_seq/validation/all_valid.txt"
test_filepath = ""
cwd = config.cwd
if mode == train_prefix:
self.names, self.annotations, self.proteins, self.classes, self.w2i = read_data_(
os.path.join(cwd, train_filepath))
self.augment = True
self.mode = mode
elif mode == dev_prefix:
self.names, self.annotations, self.proteins, _, _ = read_data_(
os.path.join(cwd, dev_filepath))
self.mode = mode
self.augment = False
self.classes = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y']
indixes = list(range(len(self.classes)))
print(self.classes)
self.w2i = dict(zip(self.classes, indixes))
# print('classes\n\n', self.classes,len(self.classes))
self.ssl = config.dataset.type == 'SSL'
self.use_elmo = config.dataset.use_strings
if self.use_elmo:
print('\n USE ELMO \n')
# model_dir = Path('/config/uniref50_v2')
# weights = model_dir / 'weights.hdf5'
# options = model_dir / 'options.json'
# self.embedder = ElmoEmbedder(options, weights, cuda_device=-1)
if self.ssl:
print('\n SELF-SUPERVISED\n')
else:
print('\nIDP fully-supervised\n')
def __len__(self):
return len(self.proteins)
def __getitem__(self, index):
# if self.mode == 'train':
# if self.augment:
# L = len(self.proteins[index])
# left = randint(0, L//4)
# right = randint(L//2, L)
# x = [self.w2i[amino] for amino in self.proteins[index]][left:right]
# y = [int(i) for i in self.annotations[index]][left:right]
# x = torch.LongTensor(x) # .unsqueeze(-1)
# y = torch.LongTensor(y) # .unsqueeze(-1)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
#
# #print(y.shape, y1.shape)
# #print(x[:5] , y1[:5])
# # print(x,y)
# return x, y#1
if self.mode == 'train' and self.augment:
L = len(self.proteins[index])
left = randint(0, L // 4)
right = randint(L // 2, L)
x = [self.w2i[amino] for amino in self.proteins[index]] # [left:right]
if self.use_elmo:
x = self.proteins[index][:1022]
# print(seq)
# x = torch.FloatTensor(self.embedder.embed_sentence(list(seq))).sum(dim=0).cpu()
else:
x = torch.LongTensor(x)
y = [int(i) for i in self.annotations[index][:1022]] # [left:right]
# x = torch.LongTensor(x) # .unsqueeze(-1)
y = torch.LongTensor(y) # .unsqueeze(-1)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
# print(y.shape, y1.shape)
# print(x[:5] , y1[:5])
# print(x,y)
if self.ssl:
return x, x
return x, y # 1
x = [self.w2i[amino] for amino in self.proteins[index]]
y = [int(i) for i in self.annotations[index]]
# print(len(x),len(y),len(self.proteins[index]),len(self.annotations[index]))
if self.use_elmo:
seq = self.proteins[index][:1022]
# print(seq)
x = seq
else:
x = torch.LongTensor(x)
y = torch.LongTensor([int(i) for i in self.annotations[index]][:1022]) # .unsqueeze(-1)
# print(x.shape,y.shape)
# assert x.shape == y.shape, print(self.names[index])
y1 = torch.nn.functional.one_hot(y, num_classes=2)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
# print(y,y1)
# print(x,y)
if self.ssl:
return x, x
return x, y # 1
class DMLoader(Dataset):
def __init__(self, config, mode):
train_filepath = "data/idp_seq_2_seq/train/all_train.txt"
dev_filepath = "data/idp_seq_2_seq/validation/all_valid.txt"
test_filepath = ""
cwd = config.cwd
if mode == train_prefix:
self.names, self.annotations, self.proteins, self.classes, self.w2i = read_data_(
os.path.join(cwd, train_filepath))
self.augment = True
self.mode = mode
elif mode == dev_prefix:
self.names, self.annotations, self.proteins, _, _ = read_data_(
os.path.join(cwd, dev_filepath))
self.mode = mode
self.augment = False
self.classes = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y']
indixes = list(range(len(self.classes)))
print(self.classes)
self.w2i = dict(zip(self.classes, indixes))
# print('classes\n\n', self.classes,len(self.classes))
self.ssl = config.dataset.type == 'SSL'
self.use_elmo = config.dataset.use_strings
if self.use_elmo:
print('\n USE ELMO \n')
# model_dir = Path('/config/uniref50_v2')
# weights = model_dir / 'weights.hdf5'
# options = model_dir / 'options.json'
# self.embedder = ElmoEmbedder(options, weights, cuda_device=-1)
if self.ssl:
print('\n SELF-SUPERVISED\n')
else:
print('\nIDP fully-supervised\n')
def __len__(self):
return len(self.proteins)
def __getitem__(self, index):
# if self.mode == 'train':
# if self.augment:
# L = len(self.proteins[index])
# left = randint(0, L//4)
# right = randint(L//2, L)
# x = [self.w2i[amino] for amino in self.proteins[index]][left:right]
# y = [int(i) for i in self.annotations[index]][left:right]
# x = torch.LongTensor(x) # .unsqueeze(-1)
# y = torch.LongTensor(y) # .unsqueeze(-1)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
#
# #print(y.shape, y1.shape)
# #print(x[:5] , y1[:5])
# # print(x,y)
# return x, y#1
if self.mode == 'train' and self.augment:
L = len(self.proteins[index])
left = randint(0, L // 4)
right = randint(L // 2, L)
x = [self.w2i[amino] for amino in self.proteins[index]] # [left:right]
if self.use_elmo:
x = self.proteins[index]
# print(seq)
# x = torch.FloatTensor(self.embedder.embed_sentence(list(seq))).sum(dim=0).cpu()
else:
x = torch.LongTensor(x)
y = [int(i) for i in self.annotations[index]] # [left:right]
# x = torch.LongTensor(x) # .unsqueeze(-1)
y = torch.LongTensor(y) # .unsqueeze(-1)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
# print(y.shape, y1.shape)
# print(x[:5] , y1[:5])
# print(x,y)
if self.ssl:
return x, x
return x, y # 1
x = [self.w2i[amino] for amino in self.proteins[index]]
y = [int(i) for i in self.annotations[index]]
# print(len(x),len(y),len(self.proteins[index]),len(self.annotations[index]))
if self.use_elmo:
seq = self.proteins[index]
# print(seq)
x = seq
else:
x = torch.LongTensor(x)
y = torch.LongTensor([int(i) for i in self.annotations[index]]) # .unsqueeze(-1)
# print(x.shape,y.shape)
# assert x.shape == y.shape, print(self.names[index])
y1 = torch.nn.functional.one_hot(y, num_classes=2)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
# print(y,y1)
# print(x,y)
if self.ssl:
return x, x
return x, y # 1
class MXD494Loader(Dataset):
def __init__(self, config, mode):
train_prefix = "train"
dev_prefix = "val"
test_prefix = "test"
train_filepath = "data/idp_seq_2_seq/mxd494/MXD494_train_all.txt"
test_filepath = "data/idp_seq_2_seq/mxd494/MXD494.txt"
dev_filepath = test_filepath
cwd = config.cwd
if mode == train_prefix:
self.names, self.annotations, self.proteins, self.classes, self.w2i = read_data_(
os.path.join(cwd, train_filepath))
self.augment = True
self.mode = mode
elif mode == dev_prefix:
self.names, self.annotations, self.proteins, self.classes, self.w2i = read_data_(
os.path.join(cwd, dev_filepath))
self.mode = mode
self.augment = False
self.classes = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y']
indixes = list(range(len(self.classes)))
print(self.classes)
self.w2i = dict(zip(self.classes, indixes))
# print('classes\n\n', self.classes,len(self.classes))
self.ssl = config.dataset.type == 'SSL'
# self.use_elmo = config.dataset.use_strings
# if self.use_elmo:
# print('\n USE ELMO \n')
# model_dir = Path('/config/uniref50_v2')
# weights = model_dir / 'weights.hdf5'
# options = model_dir / 'options.json'
# embedder = ElmoEmbedder(options, weights, cuda_device=0)
if self.ssl:
print('\n SELF-SUPERVISED\n')
else:
print('\nIDP fully-supervised\n')
def __len__(self):
return len(self.proteins)
def __getitem__(self, index):
# if self.mode == 'train':
# if self.augment:
# L = len(self.proteins[index])
# left = randint(0, L//4)
# right = randint(L//2, L)
# x = [self.w2i[amino] for amino in self.proteins[index]][left:right]
# y = [int(i) for i in self.annotations[index]][left:right]
# x = torch.LongTensor(x) # .unsqueeze(-1)
# y = torch.LongTensor(y) # .unsqueeze(-1)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
#
# #print(y.shape, y1.shape)
# #print(x[:5] , y1[:5])
# # print(x,y)
# return x, y#1
if self.mode == 'train' and random.uniform(0, 1) > 0.7:
L = len(self.proteins[index])
left = randint(0, L // 4)
right = randint(L // 2, L)
x = [self.w2i[amino] for amino in self.proteins[index]] # [left:right]
if True:
x = self.proteins[index] # [left:right]
else:
x = torch.LongTensor(x)
y = [int(i) for i in self.annotations[index]] # [left:right]
# x = torch.LongTensor(x) # .unsqueeze(-1)
y = torch.LongTensor(y) # .unsqueeze(-1)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
# print(y.shape, y1.shape)
# print(x[:5] , y1[:5])
# print(x,y)
if self.ssl:
return x, x
return x, y # 1
x = [self.w2i[amino] for amino in self.proteins[index]]
y = [int(i) for i in self.annotations[index]]
# print(len(x),len(y),len(self.proteins[index]),len(self.annotations[index]))
if True:
x = self.proteins[index]
else:
x = torch.LongTensor(x)
y = torch.LongTensor([int(i) for i in self.annotations[index]]) # .unsqueeze(-1)
# print(x.shape,y.shape)
# assert x.shape == y.shape, print(self.names[index])
y1 = torch.nn.functional.one_hot(y, num_classes=2)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
# print(y,y1)
# print(x,y)
if self.ssl:
return x, x
return x, y # 1
class Disorder723(Dataset):
def __init__(self, config, mode):
train_filepath = "data/idp_seq_2_seq/disorder723/train_723.txt"
dev_filepath = "data/idp_seq_2_seq/disorder723/disorder723.txt"
test_filepath = ""
cwd = config.cwd
if mode == train_prefix:
self.names, self.annotations, self.proteins, self.classes, self.w2i = read_data_(
os.path.join(cwd, train_filepath))
self.augment = True
self.mode = mode
elif mode == dev_prefix:
self.names, self.annotations, self.proteins, _, _ = read_data_(
os.path.join(cwd, dev_filepath))
self.mode = mode
self.augment = False
self.classes = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y']
indixes = list(range(len(self.classes)))
print(self.classes)
self.w2i = dict(zip(self.classes, indixes))
# print('classes\n\n', self.classes,len(self.classes))
self.ssl = config.dataset.type == 'SSL'
# self.use_elmo = config.dataset.use_strings
# if self.use_elmo:
# print('\n USE ELMO \n')
# model_dir = Path('/config/uniref50_v2')
# weights = model_dir / 'weights.hdf5'
# options = model_dir / 'options.json'
# self.embedder = ElmoEmbedder(options, weights, cuda_device=-1)
def __len__(self):
return len(self.proteins)
def __getitem__(self, index):
# if self.mode == 'train':
# if self.augment:
# L = len(self.proteins[index])
# left = randint(0, L//4)
# right = randint(L//2, L)
# x = [self.w2i[amino] for amino in self.proteins[index]][left:right]
# y = [int(i) for i in self.annotations[index]][left:right]
# x = torch.LongTensor(x) # .unsqueeze(-1)
# y = torch.LongTensor(y) # .unsqueeze(-1)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
#
# #print(y.shape, y1.shape)
# #print(x[:5] , y1[:5])
# # print(x,y)
# return x, y#1
if self.mode == 'train' and self.augment:
L = len(self.proteins[index])
left = randint(0, L // 4)
right = randint(L // 2, L)
x = [self.w2i[amino] for amino in self.proteins[index]] # [left:right]
if True:
x = self.proteins[index]
# print(seq)
# x = torch.FloatTensor(self.embedder.embed_sentence(list(seq))).sum(dim=0).cpu()
else:
x = torch.LongTensor(x)
y = [int(i) for i in self.annotations[index]] # [left:right]
# x = torch.LongTensor(x) # .unsqueeze(-1)
y = torch.LongTensor(y) # .unsqueeze(-1)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
# print(y.shape, y1.shape)
# print(x[:5] , y1[:5])
# print(x,y)
if self.ssl:
return x, x
return x, y # 1
x = [self.w2i[amino] for amino in self.proteins[index]]
y = [int(i) for i in self.annotations[index]]
# print(len(x),len(y),len(self.proteins[index]),len(self.annotations[index]))
if True:
seq = self.proteins[index]
# print(seq)
x = seq
else:
x = torch.LongTensor(x)
y = torch.LongTensor([int(i) for i in self.annotations[index]]) # .unsqueeze(-1)
# print(x.shape,y.shape)
# assert x.shape == y.shape, print(self.names[index])
y1 = torch.nn.functional.one_hot(y, num_classes=2)
# x = torch.nn.functional.one_hot(x, num_classes=20).float()
# print(y,y1)
# print(x,y)
if self.ssl:
return x, x
return x, y # 1
class CAID2018_Disprot(Dataset):
def __init__(self, config, mode):
dev_filepath = "data/CAID_data_2018/disprot-disorder.txt"
test_filepath = ""
cwd = config.cwd
self.names, self.annotations, self.proteins, _, _ = read_data_(
os.path.join(cwd, dev_filepath))
self.mode = mode
self.augment = False
self.classes = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y']
indixes = list(range(len(self.classes)))
print(self.classes)
self.w2i = dict(zip(self.classes, indixes))
# print('classes\n\n', self.classes,len(self.classes))
self.ssl = config.dataset.type == 'SSL'
self.use_elmo = config.dataset.use_elmo
if self.use_elmo:
print('\n USE ELMO \n')
# model_dir = Path('/config/uniref50_v2')
# weights = model_dir / 'weights.hdf5'
# options = model_dir / 'options.json'
# self.embedder = ElmoEmbedder(options, weights, cuda_device=-1)
if self.ssl:
print('\n SELF-SUPERVISED\n')
else:
print('\nIDP fully-supervised\n')
def __len__(self):
return len(self.proteins)
def __getitem__(self, index):
x = [self.w2i[amino] for amino in self.proteins[index]]
y = [int(i) for i in self.annotations[index]]
# print(len(x),len(y),len(self.proteins[index]),len(self.annotations[index]))
if self.use_elmo:
seq = self.proteins[index]
# print(seq)
x = seq
else:
x = torch.LongTensor(x)
y = torch.LongTensor([int(i) for i in self.annotations[index]]) # .unsqueeze(-1)
return x, y # 1
class FidpnnLoader(Dataset):
def __init__(self, config, mode):
train_filepath = "data/fidpnn_data/flDPnn_Training_Annotation.txt"
dev_filepath = "data/fidpnn_data/flDPnn_Validation_Annotation.txt"
test_filepath = "data/fidpnn_data/flDPnn_DissimiTest_Annotation.txt"
cwd = config.cwd
if mode == train_prefix:
self.names, self.proteins, self.annotations = read_fidpnn_dataset(
os.path.join(cwd, train_filepath))
self.augment = True
self.mode = mode
elif mode == dev_prefix:
self.names, self.proteins, self.annotations = read_fidpnn_dataset(
os.path.join(cwd, dev_filepath))
self.mode = mode
self.augment = False
elif mode == test_prefix:
self.names, self.proteins, self.annotations = read_fidpnn_dataset(
os.path.join(cwd, dev_filepath))
self.mode = mode
self.augment = False
self.classes = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y']
indixes = list(range(len(self.classes)))
print(self.classes)
self.w2i = dict(zip(self.classes, indixes))
self.ssl = config.dataset.type == 'SSL'
self.use_strings = config.dataset.use_elmo
if self.use_strings:
print('\n USE ELMO \n')
if self.ssl:
print('\n SELF-SUPERVISED\n')
else:
print('\nIDP fully-supervised\n')
def __len__(self):
return len(self.proteins)
def __getitem__(self, index):
if self.mode == 'train' and self.augment:
L = len(self.proteins[index])
left = randint(0, L // 4)
right = randint(L // 2, L)
x = [self.w2i[amino] for amino in self.proteins[index]] # [left:right]
if self.use_strings:
x = self.proteins[index]
else:
x = torch.LongTensor(x)
y = [int(i) for i in self.annotations[index]] # [left:right]
y = torch.LongTensor(y) # .unsqueeze(-1)
if self.ssl:
return x, x
return x, y # 1
x = [self.w2i[amino] for amino in self.proteins[index]]
y = [int(i) for i in self.annotations[index]]
if self.use_strings:
x = self.proteins[index]
else:
x = torch.LongTensor(x)
y = torch.LongTensor([int(i) for i in self.annotations[index]]) # .unsqueeze(-1)
if self.ssl:
return x, x
return x, y # 1
| 38.627737
| 118
| 0.516204
| 2,670
| 21,168
| 3.986517
| 0.053933
| 0.065389
| 0.063886
| 0.015784
| 0.961575
| 0.951334
| 0.940436
| 0.931417
| 0.919203
| 0.906332
| 0
| 0.018012
| 0.336451
| 21,168
| 547
| 119
| 38.698355
| 0.739784
| 0.279006
| 0
| 0.905488
| 0
| 0
| 0.066095
| 0.034542
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054878
| false
| 0
| 0.018293
| 0.018293
| 0.17378
| 0.060976
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
50782f9a25e8be1f91f9019869fc170a18eed6f0
| 3,395
|
py
|
Python
|
monitor/anomaly_detection/task/metric_task.py
|
opengauss-mirror/openGauss-AI
|
449ce3cac81ced74dd56edf76709553411b0814a
|
[
"MulanPSL-1.0"
] | 1
|
2021-12-22T08:31:07.000Z
|
2021-12-22T08:31:07.000Z
|
monitor/anomaly_detection/task/metric_task.py
|
opengauss-mirror/openGauss-AI
|
449ce3cac81ced74dd56edf76709553411b0814a
|
[
"MulanPSL-1.0"
] | null | null | null |
monitor/anomaly_detection/task/metric_task.py
|
opengauss-mirror/openGauss-AI
|
449ce3cac81ced74dd56edf76709553411b0814a
|
[
"MulanPSL-1.0"
] | 3
|
2021-12-16T13:55:57.000Z
|
2022-02-24T09:53:49.000Z
|
/*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
"""
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
import os
import subprocess
from utils import unify_byte_unit
def cpu_usage():
child1 = subprocess.Popen(['ps', '-ux'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False)
sub_chan = child2.communicate()
if not sub_chan[0]:
result = 0.0
else:
result = sub_chan[0].split()[2].decode('utf-8')
return result
def io_read():
child1 = subprocess.Popen(['pidstat', '-d'], stdout=subprocess.PIPE, shell=False)
child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, shell=False)
sub_chan = child2.communicate()
if not sub_chan[0]:
result = 0.0
else:
result = sub_chan[0].split()[3].decode('utf-8')
return result
def io_write():
child1 = subprocess.Popen(['pidstat', '-d'], stdout=subprocess.PIPE, shell=False)
child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, shell=False)
sub_chan = child2.communicate()
if not sub_chan[0]:
result = 0.0
else:
result = sub_chan[0].split()[4].decode('utf-8')
return result
def memory_usage():
child1 = subprocess.Popen(['ps', '-ux'], stdout=subprocess.PIPE, shell=False)
child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, shell=False)
sub_chan = child2.communicate()
if not sub_chan[0]:
result = 0.0
else:
result = sub_chan[0].split()[3].decode('utf-8')
return result
def disk_space():
pg_data = os.getenv('PGDATA')
if pg_data is None:
raise ValueError('not found PGDATA in environment.')
else:
pg_data = os.path.realpath(pg_data)
child = subprocess.Popen(['du', '-sh', pg_data], stdout=subprocess.PIPE, shell=False)
sub_chan = child.communicate()
if sub_chan[1] is not None:
raise ValueError('error when get disk usage of openGauss: {error}'.
format(error=sub_chan[1].decode('utf-8')))
if not sub_chan[0]:
result = 0.0
else:
result = unify_byte_unit(sub_chan[0].decode('utf-8'))
return result
| 34.292929
| 110
| 0.658616
| 482
| 3,395
| 4.574689
| 0.273859
| 0.053968
| 0.036281
| 0.097959
| 0.838095
| 0.828118
| 0.81678
| 0.786848
| 0.781406
| 0.745125
| 0
| 0.024868
| 0.218262
| 3,395
| 98
| 111
| 34.642857
| 0.805953
| 0
| 0
| 0.457143
| 0
| 0
| 0.071181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.042857
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
508349b5346a9b794452b1bbb4120c27b9998d18
| 5,159
|
py
|
Python
|
dhl_delivery/dhl_process.py
|
ivangsm/dhl_delivery
|
aa7ae5251715350a1ffa3ecbc96c8ac233f5dd21
|
[
"MIT"
] | null | null | null |
dhl_delivery/dhl_process.py
|
ivangsm/dhl_delivery
|
aa7ae5251715350a1ffa3ecbc96c8ac233f5dd21
|
[
"MIT"
] | null | null | null |
dhl_delivery/dhl_process.py
|
ivangsm/dhl_delivery
|
aa7ae5251715350a1ffa3ecbc96c8ac233f5dd21
|
[
"MIT"
] | null | null | null |
import time
from datetime import datetime
import multiprocessing
import dhl_delivery
import dhl_delivery.config
import urllib.parse
import urllib.request
class DhlProcess:
def __init__(self):
pass
@staticmethod
def check_utilities(n, return_dict):
return_dict['return_check_utilities'] = True
@staticmethod
def stop_process(p, start_time, time_to_stop_in_second):
start_this = time.time()
while time.time() < start_this + time_to_stop_in_second:
time_end_this = datetime.now()
delta = time_end_this - start_time
time_interval_in_second = delta.total_seconds()
if time_interval_in_second >= time_to_stop_in_second:
try:
p.terminate()
except:
pass
def process_quote(self, xml_formated_data_string):
time_now = datetime.now()
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
p_one = multiprocessing.Process(
target=self.check_utilities, name="name_check_utilities", args=(20, return_dict))
jobs.append(p_one)
p_two = multiprocessing.Process(target=self.call_dhl_quote_api, name="name_call_dhl_quote_api", args=(
xml_formated_data_string, return_dict))
jobs.append(p_two)
p_stop = multiprocessing.Process(target=self.stop_process, name="name_stop_process", args=(
p_two, time_now, dhl_delivery.max_response_time))
# Start the jobs
p_one.start()
p_two.start()
p_stop.start()
for job in jobs:
job.join()
return return_dict
@staticmethod
def call_dhl_quote_api(xml_formated_data_string, return_dict):
url = dhl_delivery.config.dhl_api_url
# data = urllib.parse.urlencode(xml_formated_data_string)
# data = data.encode('ascii')
url_request = urllib.request.Request(url, xml_formated_data_string)
with urllib.request.urlopen(url_request) as response:
url_response = response.read()
# getting as XML String from DHL Response
url_response_data = url_response.read()
return_dict['return_dhl_api_response'] = url_response_data
def process_delivery(self, xml_formated_data_string):
time_now = datetime.now()
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
p_one = multiprocessing.Process(
target=self.check_utilities, name="name_check_utilities", args=(20, return_dict))
jobs.append(p_one)
p_two = multiprocessing.Process(target=self.call_dhl_delivery_api, name="name_call_dhl_delivery_api", args=(
xml_formated_data_string, return_dict))
jobs.append(p_two)
p_stop = multiprocessing.Process(target=self.stop_process, name="name_stop_process", args=(
p_two, time_now, dhl_delivery.max_response_time))
# Start the jobs
p_one.start()
p_two.start()
p_stop.start()
for job in jobs:
job.join()
return return_dict
@staticmethod
def call_dhl_delivery_api(xml_formated_data_string, return_dict):
url = dhl_delivery.config.dhl_api_url
# data = urllib.parse.urlencode(xml_formated_data_string)
# data = data .encode('ascii')
url_request = urllib.request.Request(url, xml_formated_data_string)
with urllib.request.urlopen(url_request) as response:
url_response = response.read()
url_response_data = url_response.read()
return_dict['return_dhl_api_response'] = url_response_data
def process_pickup(self, xml_formated_data_string):
time_now = datetime.now()
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
p_one = multiprocessing.Process(
target=self.check_utilities, name="name_check_utilities", args=(20, return_dict))
jobs.append(p_one)
p_two = multiprocessing.Process(target=self.call_dhl_pickup_api, name="name_call_dhl_pickup_api", args=(
xml_formated_data_string, return_dict))
jobs.append(p_two)
p_stop = multiprocessing.Process(target=self.stop_process, name="name_stop_process", args=(
p_two, time_now, dhl_delivery.max_response_time))
# Start the jobs
p_one.start()
p_two.start()
p_stop.start()
for job in jobs:
job.join()
return return_dict
@staticmethod
def call_dhl_pickup_api(xml_formated_data_string, return_dict):
url = dhl_delivery.config.dhl_api_url
# data = urllib.parse.urlencode(xml_formated_data_string)
# data = data.encode('ascii')
url_request = urllib.request.Request(url, xml_formated_data_string)
with urllib.request.urlopen(url_request) as response:
url_response = response.read()
# getting as XML String from DHL Response
url_response_data = url_response.read()
return_dict['return_dhl_api_response'] = url_response_data
| 36.330986
| 116
| 0.663695
| 647
| 5,159
| 4.925811
| 0.11592
| 0.062755
| 0.070599
| 0.098839
| 0.834641
| 0.800753
| 0.800753
| 0.800753
| 0.800753
| 0.800753
| 0
| 0.00155
| 0.249855
| 5,159
| 141
| 117
| 36.588652
| 0.821964
| 0.073076
| 0
| 0.716981
| 0
| 0
| 0.057652
| 0.034382
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084906
| false
| 0.018868
| 0.066038
| 0
| 0.188679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
50d3a4d451ad52a377683d73bc2a6be4b3d89439
| 92
|
py
|
Python
|
src/gui_utils.py
|
kataphraktos/faa_eval
|
d0260a35abdc60ce27fdf58765f78177461255a3
|
[
"MIT"
] | null | null | null |
src/gui_utils.py
|
kataphraktos/faa_eval
|
d0260a35abdc60ce27fdf58765f78177461255a3
|
[
"MIT"
] | null | null | null |
src/gui_utils.py
|
kataphraktos/faa_eval
|
d0260a35abdc60ce27fdf58765f78177461255a3
|
[
"MIT"
] | null | null | null |
from tkinter import filedialog
def get_file_path():
return filedialog.askopenfilename()
| 23
| 39
| 0.804348
| 11
| 92
| 6.545455
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 92
| 4
| 39
| 23
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
0fa60f7ad608422ba6750ff3282494067c43d3fa
| 497
|
py
|
Python
|
spec/fixtures/specificLint/lint_unsupportedoperandtypesfor.py
|
Askaholic/linter-mypy
|
97978c5c9455d4215ea0cd0395e34b8eb118feca
|
[
"MIT"
] | 33
|
2016-12-08T14:53:50.000Z
|
2022-02-22T20:56:49.000Z
|
spec/fixtures/specificLint/lint_unsupportedoperandtypesfor.py
|
Askaholic/linter-mypy
|
97978c5c9455d4215ea0cd0395e34b8eb118feca
|
[
"MIT"
] | 27
|
2017-03-12T01:18:05.000Z
|
2021-01-27T14:59:54.000Z
|
spec/fixtures/specificLint/lint_unsupportedoperandtypesfor.py
|
Askaholic/linter-mypy
|
97978c5c9455d4215ea0cd0395e34b8eb118feca
|
[
"MIT"
] | 7
|
2017-03-12T01:56:07.000Z
|
2022-03-24T18:09:00.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
add(1, 2) + "!"
result =add(1, 2) + "!"
result= add(1, 2) + "!"
result = add(1, 2) + "!"
x= add(1, 2) + "!"
def add(x: int, y: int) -> int:
add(1, 2) + "!"
result =add(1, 2) + "!"
result= add(1, 2) + "!"
result = add(1, 2) + "!"
x= add(1, 2) + "!"
def xadd(x: int, y: int) -> int:
add(1, 2) + "!"
result =add(1, 2) + "!"
result= add(1, 2) + "!"
result = add(1, 2) + "!"
x= add(1, 2) + "!"
return x + y + "!"
return x + y + "!"
| 19.115385
| 33
| 0.420523
| 84
| 497
| 2.488095
| 0.202381
| 0.287081
| 0.358852
| 0.473684
| 0.76555
| 0.76555
| 0.76555
| 0.76555
| 0.76555
| 0.76555
| 0
| 0.087432
| 0.263581
| 497
| 25
| 34
| 19.88
| 0.483607
| 0.086519
| 0
| 0.894737
| 0
| 0
| 0.037611
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
0fb2106c6417b67000aed7028dce59e01b8f02d7
| 221
|
py
|
Python
|
settings.py
|
pavel-wh/autotesting_course
|
7d6a395fe000aa796f3ed1eeaa4387caa7dd2ecb
|
[
"MIT"
] | 1
|
2019-11-17T02:29:25.000Z
|
2019-11-17T02:29:25.000Z
|
settings.py
|
pavel-wh/autotesting_course
|
7d6a395fe000aa796f3ed1eeaa4387caa7dd2ecb
|
[
"MIT"
] | 1
|
2021-06-01T23:53:26.000Z
|
2021-06-01T23:53:26.000Z
|
settings.py
|
pavel-wh/autotesting_course
|
7d6a395fe000aa796f3ed1eeaa4387caa7dd2ecb
|
[
"MIT"
] | null | null | null |
BROWSER_DRIVER = "D:\\Soft\\Installed\\webDrivers\\chromedriver.exe"
BROWSER_DRIVER_CHROME = "D:\\Soft\\Installed\\webDrivers\\chromedriver.exe"
BROWSER_DRIVER_FIREFOX = r"D:\\Soft\\Installed\webDrivers\\geckodriver.exe"
| 55.25
| 75
| 0.78733
| 27
| 221
| 6.259259
| 0.444444
| 0.230769
| 0.248521
| 0.426036
| 0.615385
| 0.615385
| 0.615385
| 0.615385
| 0
| 0
| 0
| 0
| 0.040724
| 221
| 3
| 76
| 73.666667
| 0.79717
| 0
| 0
| 0
| 0
| 0
| 0.656109
| 0.656109
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0fc2562e5bc83c7cebaf0d1f087d594eb5745ed7
| 347
|
py
|
Python
|
python/debug_runner.py
|
paul-leonard/data-structures-and-algorithms
|
783d2450d2cafcb507fefd79dc02c4819aebb312
|
[
"MIT"
] | 1
|
2020-09-07T19:39:18.000Z
|
2020-09-07T19:39:18.000Z
|
python/debug_runner.py
|
paul-leonard/data-structures-and-algorithms
|
783d2450d2cafcb507fefd79dc02c4819aebb312
|
[
"MIT"
] | 1
|
2020-11-16T06:54:25.000Z
|
2020-11-16T06:54:25.000Z
|
python/debug_runner.py
|
paul-leonard/data-structures-and-algorithms
|
783d2450d2cafcb507fefd79dc02c4819aebb312
|
[
"MIT"
] | null | null | null |
from code_challenges.fifo_animal_shelter.fifo_animal_shelter import AnimalShelter
print("You can use this file and run debugger againist it... and then have imports and calls of functions you want to test. Those python files and functions can then be a far lower level in the file structure... but debugger will actually be able to find them.")
| 86.75
| 263
| 0.806916
| 59
| 347
| 4.661017
| 0.762712
| 0.072727
| 0.123636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152738
| 347
| 3
| 264
| 115.666667
| 0.935374
| 0
| 0
| 0
| 0
| 0.5
| 0.731988
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
0fc3aac476a34b2676d7a8a2427cbe601bedb0c3
| 192
|
py
|
Python
|
lumapps/api/__init__.py
|
cmccoy10/lumapps-sdk
|
3e4a15da526f921faa401b45e3945d1301b52dbe
|
[
"MIT"
] | 8
|
2018-12-17T15:46:22.000Z
|
2021-11-16T16:18:26.000Z
|
lumapps/api/__init__.py
|
cmccoy10/lumapps-sdk
|
3e4a15da526f921faa401b45e3945d1301b52dbe
|
[
"MIT"
] | 118
|
2019-01-03T10:35:59.000Z
|
2022-03-17T16:43:05.000Z
|
lumapps/api/__init__.py
|
cmccoy10/lumapps-sdk
|
3e4a15da526f921faa401b45e3945d1301b52dbe
|
[
"MIT"
] | 10
|
2018-11-23T13:57:22.000Z
|
2021-09-30T20:55:40.000Z
|
from lumapps.api.base_client import BaseClient, FileContent # noqa
from lumapps.api.client import LumAppsClient # noqa
from lumapps.api.conf import __pypi_packagename__, __version__ # noqa
| 48
| 70
| 0.822917
| 25
| 192
| 5.92
| 0.56
| 0.222973
| 0.283784
| 0.243243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119792
| 192
| 3
| 71
| 64
| 0.87574
| 0.072917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0fccdcdf3d849a57e9a0a5a5621c0fef81122249
| 15,613
|
py
|
Python
|
tests/test_cli.py
|
renemoll/bob
|
0d0d897f92816b90dbcdd8131d22e3457fb6588b
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
renemoll/bob
|
0d0d897f92816b90dbcdd8131d22e3457fb6588b
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
renemoll/bob
|
0d0d897f92816b90dbcdd8131d22e3457fb6588b
|
[
"MIT"
] | null | null | null |
"""Test the CLI entry point."""
import os
import pathlib
import subprocess
import tempfile
import docopt
import pytest_mock
import toml
from bob.cli import main
def test_cli_bootstrap(
mocker: pytest_mock.MockerFixture, tmp_path: pathlib.Path
) -> None:
"""Verify the CLI performs a plain bootstrap operation."""
# 1. Prepare
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": None,
"bootstrap": True,
"build": False,
"configure": False,
"debug": False,
"release": False,
}
work = tmp_path / "work"
work.mkdir()
os.chdir(str(work))
# 2. Execute
result = main()
# 3. Verify
assert result == 0
cmake = work / "cmake"
assert cmake.is_dir()
cmake_file = cmake / "FindBob.cmake"
assert cmake_file.is_file()
ref = pathlib.Path(__file__).parent.resolve() / "ref" / "bootstrap.cmake"
assert cmake_file.read_text().strip() == ref.read_text().strip()
def test_cli_configure_default(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a configure."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": None,
"bootstrap": False,
"build": False,
"configure": True,
"debug": False,
"release": False,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
subprocess.run.assert_called_once_with(
[
"cmake",
"-B",
"build/native-release",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Release",
],
check=True,
)
def test_cli_configure_release(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a configure."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": None,
"bootstrap": False,
"build": False,
"configure": True,
"debug": False,
"release": True,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
subprocess.run.assert_called_once_with(
[
"cmake",
"-B",
"build/native-release",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Release",
],
check=True,
)
def test_cli_configure_debug(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a configure."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": None,
"bootstrap": False,
"build": False,
"configure": True,
"debug": True,
"release": False,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
subprocess.run.assert_called_once_with(
[
"cmake",
"-B",
"build/native-debug",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Debug",
],
check=True,
)
def test_cli_configure_linux_default(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a configure."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": "linux",
"bootstrap": False,
"build": False,
"configure": True,
"debug": False,
"release": False,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
cwd = pathlib.Path.cwd()
subprocess.run.assert_called_once_with(
[
"docker",
"run",
"--rm",
"-v",
f"{cwd}:/work/",
"renemoll/builder_clang",
"cmake",
"-B",
"build/linux-release",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Release",
"-G",
"Ninja",
],
check=True,
)
def test_cli_configure_linux_release(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a configure."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": "linux",
"bootstrap": False,
"build": False,
"configure": True,
"debug": False,
"release": True,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
cwd = pathlib.Path.cwd()
subprocess.run.assert_called_once_with(
[
"docker",
"run",
"--rm",
"-v",
f"{cwd}:/work/",
"renemoll/builder_clang",
"cmake",
"-B",
"build/linux-release",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Release",
"-G",
"Ninja",
],
check=True,
)
def test_cli_configure_linux_debug(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a configure."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": "linux",
"bootstrap": False,
"build": False,
"configure": True,
"debug": True,
"release": False,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
cwd = pathlib.Path.cwd()
subprocess.run.assert_called_once_with(
[
"docker",
"run",
"--rm",
"-v",
f"{cwd}:/work/",
"renemoll/builder_clang",
"cmake",
"-B",
"build/linux-debug",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Debug",
"-G",
"Ninja",
],
check=True,
)
def test_cli_configure_invalid_target(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a configure."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": "dummy",
"bootstrap": False,
"build": False,
"configure": True,
"debug": True,
"release": False,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 65
subprocess.run.assert_not_called()
def test_cli_build_default(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a build."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": None,
"bootstrap": False,
"build": True,
"configure": False,
"debug": False,
"release": False,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
subprocess.run.assert_any_call(
[
"cmake",
"-B",
"build/native-release",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Release",
],
check=True,
)
subprocess.run.assert_any_call(
["cmake", "--build", "build/native-release"], check=True
)
def test_cli_build_release(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a build."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": None,
"bootstrap": False,
"build": True,
"configure": False,
"debug": False,
"release": True,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
subprocess.run.assert_any_call(
[
"cmake",
"-B",
"build/native-release",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Release",
],
check=True,
)
subprocess.run.assert_any_call(
["cmake", "--build", "build/native-release"], check=True
)
def test_cli_build_debug(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a build."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": None,
"bootstrap": False,
"build": True,
"configure": False,
"debug": True,
"release": False,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
subprocess.run.assert_any_call(
[
"cmake",
"-B",
"build/native-debug",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Debug",
],
check=True,
)
subprocess.run.assert_any_call(
["cmake", "--build", "build/native-debug"], check=True
)
def test_cli_build_linux_default(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a build."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": "linux",
"bootstrap": False,
"build": True,
"configure": False,
"debug": False,
"release": False,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
cwd = pathlib.Path.cwd()
subprocess.run.assert_any_call(
[
"docker",
"run",
"--rm",
"-v",
f"{cwd}:/work/",
"renemoll/builder_clang",
"cmake",
"-B",
"build/linux-release",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Release",
"-G",
"Ninja",
],
check=True,
)
subprocess.run.assert_any_call(
[
"docker",
"run",
"--rm",
"-v",
f"{cwd}:/work/",
"renemoll/builder_clang",
"cmake",
"--build",
"build/linux-release",
],
check=True,
)
def test_cli_build_linux_release(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a build."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": "linux",
"bootstrap": False,
"build": True,
"configure": False,
"debug": False,
"release": True,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
cwd = pathlib.Path.cwd()
subprocess.run.assert_any_call(
[
"docker",
"run",
"--rm",
"-v",
f"{cwd}:/work/",
"renemoll/builder_clang",
"cmake",
"-B",
"build/linux-release",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Release",
"-G",
"Ninja",
],
check=True,
)
subprocess.run.assert_any_call(
[
"docker",
"run",
"--rm",
"-v",
f"{cwd}:/work/",
"renemoll/builder_clang",
"cmake",
"--build",
"build/linux-release",
],
check=True,
)
def test_cli_build_linux_debug(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI performs the correct argument conversion for a build."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": "linux",
"bootstrap": False,
"build": True,
"configure": False,
"debug": True,
"release": False,
}
# 2. Execute
result = main()
# 3. Verify
assert result == 0
cwd = pathlib.Path.cwd()
subprocess.run.assert_any_call(
[
"docker",
"run",
"--rm",
"-v",
f"{cwd}:/work/",
"renemoll/builder_clang",
"cmake",
"-B",
"build/linux-debug",
"-S",
".",
"-DCMAKE_BUILD_TYPE=Debug",
"-G",
"Ninja",
],
check=True,
)
subprocess.run.assert_any_call(
[
"docker",
"run",
"--rm",
"-v",
f"{cwd}:/work/",
"renemoll/builder_clang",
"cmake",
"--build",
"build/linux-debug",
],
check=True,
)
def test_cli_build_error(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI captures subprocess exceptions."""
# 1. Prepare
mocker.patch("subprocess.run")
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": None,
"bootstrap": False,
"build": True,
"configure": False,
"debug": True,
"release": False,
}
subprocess.run.side_effect = subprocess.CalledProcessError(
returncode=1, cmd="exception_cmd"
)
# 2. Execute
result = main()
# 3. Verify
assert result == os.EX_SOFTWARE
subprocess.run.assert_called_once_with(
["cmake", "-B", "build/native-debug", "-S", ".", "-DCMAKE_BUILD_TYPE=Debug"],
check=True,
)
def test_cli_read_toml_file(mocker: pytest_mock.MockerFixture) -> None:
"""Verify the CLI reads and utilizes an options TOML file."""
# 1. Prepare
mocker.patch("docopt.docopt")
docopt.docopt.return_value = {
"--help": False,
"--version": False,
"<target>": None,
"bootstrap": True,
"build": False,
"configure": False,
"debug": False,
"release": False,
}
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = pathlib.Path(tmp_dir)
work = tmp_path / "work"
work.mkdir()
os.chdir(str(work))
toml_file = work / "bob.toml"
toml_file.write_text(
toml.dumps({"external": {"destination_folder": "external"}})
)
# 2. Execute
main()
# 3. Verify
assert (work / "external").is_dir()
| 23.54902
| 85
| 0.504708
| 1,507
| 15,613
| 5.099536
| 0.078965
| 0.074951
| 0.074951
| 0.060377
| 0.893429
| 0.890306
| 0.890306
| 0.881588
| 0.862589
| 0.857645
| 0
| 0.00629
| 0.348299
| 15,613
| 662
| 86
| 23.584592
| 0.748993
| 0.103696
| 0
| 0.806262
| 0
| 0
| 0.20518
| 0.037948
| 0
| 0
| 0
| 0
| 0.076321
| 1
| 0.031311
| false
| 0
| 0.015656
| 0
| 0.046967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0feda9d1b8f0066c7bf31586968294c72678305b
| 163
|
py
|
Python
|
machine_translation/constants.py
|
kpburgula/nlp_code_snippets
|
edbb59443438b13450ab7103c810b1a421566bdd
|
[
"MIT"
] | 1
|
2021-12-06T04:10:08.000Z
|
2021-12-06T04:10:08.000Z
|
machine_translation/constants.py
|
kpburgula/nlp_code_snippets
|
edbb59443438b13450ab7103c810b1a421566bdd
|
[
"MIT"
] | null | null | null |
machine_translation/constants.py
|
kpburgula/nlp_code_snippets
|
edbb59443438b13450ab7103c810b1a421566bdd
|
[
"MIT"
] | null | null | null |
en_path = "/media/sf_Desktop/data/fr-en/europarl-v7.fr-en.en"
fr_path = "/media/sf_Desktop/data/fr-en/europarl-v7.fr-en.fr"
root = "/media/sf_Desktop/data/fr-en/"
| 40.75
| 61
| 0.730061
| 33
| 163
| 3.454545
| 0.30303
| 0.175439
| 0.368421
| 0.473684
| 0.894737
| 0.894737
| 0.701754
| 0.701754
| 0.701754
| 0.701754
| 0
| 0.012987
| 0.055215
| 163
| 3
| 62
| 54.333333
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0.779141
| 0.779141
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
0ff88a08b4ec9978b2608943683e700d4e8a65f1
| 37,165
|
py
|
Python
|
ctadmtool/dmspectrum/dmflux_table.py
|
sergiohcdna/ctadmtool
|
d509d51033855de5fb54d0c9be5fda84a2ed629e
|
[
"BSD-3-Clause"
] | null | null | null |
ctadmtool/dmspectrum/dmflux_table.py
|
sergiohcdna/ctadmtool
|
d509d51033855de5fb54d0c9be5fda84a2ed629e
|
[
"BSD-3-Clause"
] | 1
|
2022-03-24T02:38:35.000Z
|
2022-03-24T02:38:35.000Z
|
ctadmtool/dmspectrum/dmflux_table.py
|
sergiohcdna/ctadmtool
|
d509d51033855de5fb54d0c9be5fda84a2ed629e
|
[
"BSD-3-Clause"
] | null | null | null |
import gammalib
import math
import numpy as np
from ctadmtool.dmspectrum.dmspectra import dmspectrum
from ctadmtool.tools.misc import ValidValue , ValidString
from tqdm import tqdm
import warnings
ALLOWED_FERMIONS = ('Majorana', 'Dirac')
ALLOWED_CHANNELS = ('eL', 'eR', 'e',
'MuL', 'MuR', 'Mu', 'TauL', 'TauR', 'Tau',
'q', 'c', 'b', 't',
'WL', 'WT', 'W', 'ZL', 'ZT', 'Z', 'g', 'Gamma', 'h',
'Nue', 'NuMu', 'NuTau',
'Ve', 'VMu', 'VTau')
ALLOWED_CHANNELSNOEW = ('e','Mu','Tau','q','c','b','t','W','Z','g')
@ValidValue("_dfactor", min_val=1.e-40)
@ValidValue('_lifetime', min_val=1.e-40)
@ValidValue("_jfactor", min_val=1.e-40)
@ValidValue('_sigmav', min_val=1.e-40)
@ValidString('_delta', empty_allowed=False, options=ALLOWED_FERMIONS)
@ValidValue('_mmin', min_val=10.0)
@ValidValue('_mmax', max_val=1.e+5)
@ValidString('_srcname', empty_allowed=False)
class dmtable() :
"""
Class to compute the flux generated
from annihilation of dark matter
particles.
dmflux is a derived class from dmspectrum.
"""
# Init
def __init__(self, srcname, mmin, mmax, mpoints, dminterp, delta='Majorana',
sigmav=3.6e-26, jfactor=1.e+19, lifetime=1.e+30, dfactor=1.e+19) :
"""
Initialize the dmflux_anna class
Parameters:
----------
srcname : Name of the target or family targets
mmin : Min Mass of dark matter candidate
mmax : Max Mass of dark matter candidate
mpoints : Number of mass points to create the Fits table
dminterp : dmspectrum class instance (I avoid to write a lot
of code I already have)
delta : Parameter to describe if dark matter candidate
is a Majorana (delta=2) fermion or a
Dirac (delta=4) fermion
sigmav : Annihilation cross-section (in cm**3/s)
jfactor : Astrophysical factor in (GeV**2/cm**5)
lifetime : Decay lifetime (in s)
dfactor : Astrophysical factor in (GeV/cm**2)
"""
# And, I check that mmin < mmax, if not, then reverse the order
if mmin > mmax :
msg = ('\nI found that Minimum mass {0} '.format(mmin) +
'is greater than Maximum mass {0}.\n'.format(mmax) +
'Changing the order...')
warnings.warn(msg, RuntimeWarning)
m_min = mmax
m_max = mmin
else :
m_min = mmin
m_max = mmax
# Initialize parameters of dmflux_ana class
self._srcname = srcname
self._sigmav = sigmav
self._jfactor = jfactor
self._lifetime = lifetime
self._dfactor = dfactor
self._delta = delta
self._mmin = m_min
self._mmax = m_max
self._mpoints = mpoints
if not isinstance(dminterp, dmspectrum) :
msg = 'dminterp must be an instance of dmspectrum class'
raise TypeError(msg)
else :
self._dminterp = dminterp
self._masses = self._marray(m_min, m_max, mpoints)
if dminterp.hasEW :
self._allowed_channels = ALLOWED_CHANNELS
else :
self._allowed_channels = ALLOWED_CHANNELSNOEW
self._model = None
# Return
return
@property
def sigmav(self) :
"""
Return value of the annihilation cross-section
used to compute the flux
"""
# Return
return self._sigmav
@sigmav.setter
def sigmav(self, sigmav) :
"""
Set the value of Annihilation cross-section (in cm**3/s)
used to compute the flux
Parameters
----------
sigmav : Annihilation cross-section (cm**3/s)
"""
# Check that sigmav is greater than 1.e-35
if sigmav < 1.e-40 :
raise ValueError(('\nValue of annihilation cross-section ' +
' must be greater than 1.e-40.\n' +
'This is just to avoid possible round errors'))
# Set sigmav
self._sigmav = sigmav
# Return
return
@property
def lifetime(self) :
"""
Return value of the decay lifetime
used to compute the flux
"""
# Return
return self._lifetime
@lifetime.setter
def lifetime(self, tau_chi) :
"""
Set the value of decay lifetime (in s)
used to compute the flux
Parameters
----------
tau_chi : Annihilation cross-section (cm**3/s)
"""
# Check that sigmav is greater than 1.e-35
if tau_chi < 1.e-40 :
raise ValueError(('\nValue of decay lifetime ' +
' must be greater than 1.e-40.\n' +
'This is just to avoid possible round errors'))
# Set sigmav
self._lifetime = tau_chi
# Return
return
@property
def jfactor(self) :
"""
Return the value of the Astrophysical factor
used to compute the flux
"""
# Return
return self._jfactor
@jfactor.setter
def jfactor(self, jfactor) :
"""
Set the value of the Astrophysical factor (GeV**2/cm**5)
to compute the dm flux
Parameters
----------
jfactor : Astrophysical factor J (GeV**2/cm**5)
"""
if jfactor < 1.e-40 :
raise ValueError('\nValue of jfactor must be greater than 1.e-40.')
# Set the jfactor
self._jfactor = jfactor
# Return
return
@property
def dfactor(self) :
"""
Return the value of the Astrophysical factor
used to compute the flux
"""
# Return
return self._dfactor
@dfactor.setter
def dfactor(self, dfactor) :
"""
Set the value of the Astrophysical factor (GeV/cm**2)
to compute the dm flux
Parameters
----------
dfactor : Astrophysical factor D (GeV/cm**2)
"""
# Set the jfactor
self._dfactor = dfactor
# Return
return
@property
def mmin(self) :
"""
Return Minimum value mass (GeV) used to compute
the dm flux
"""
# Return
return self._mmin
@mmin.setter
def mmin(self, m_min) :
"""
Set the value of minimum mass (GeV) used to compute
the dm flux
"""
# Just check that the minimum mass is greater than
# 10.0 GeV.
if m_min < 10. :
raise ValueError(('\nMinimum mass {0} GeV '.format(m_min) +
'is below the allowed value (10GeV)'))
# Set minimum energy
self._mmin = m_min
# Update masses
mvalues = self._marray(self._mmin, self._mmax, self._mpoints)
self._masses = mvalues
# Return
return
@property
def mmax(self) :
"""
Return Maximum value of mass (GeV) used to compute
the dm flux
"""
# Return
return self._mmax
@mmax.setter
def mmax(self, m_max) :
"""
Set the value of minimum mass (GeV) used to compute
the dm flux
"""
if m_max > 1.e+5 :
raise ValueError(('\nMaximum mass {0} GeV '.format(m_max) +
'is above the allowed value (1.e+5GeV)'))
# Set minimum energy
self._mmax = m_max
# Update masses
mvalues = self._marray(self._mmin, self._mmax, self._mpoints)
self._masses = mvalues
# Return
return
@property
def masses(self) :
"""
Return the values of the energy array used to compute the spectrum
"""
# Return
return self._masses
@masses.setter
def masses(self, m_vals) :
"""
Set the masses used to compute the spectrum
Parameters
----------
- evals : tuple with:
- mmin : Minimum mass (GeV)
- mmax : Maximum mass (GeV)
- mpoints : Number of points to create the array
"""
mmin, mmax, mpoints = m_vals
# Check if emin and emax are valid
if mmin < 10.0 :
raise ValueError(('Mass {0} '.format(mmin) +
'is lower than the allowed value 10.0'))
if mmax > 1.e+5 :
raise ValueError(('Mass {0} '.format(mmax) +
'is greater than the allowed value 1.e+5'))
# Create energy array
mvalues = self._marray(mmin, mmax, mpoints)
self._masses = mvalues
# Return
return
@staticmethod
def _marray(mmin, mmax, mpoints) :
"""
Create list of masses to generate the fits table.
The calculation is based in the number of points
The masses are computed assuming logarithmic distance
"""
logmmin = np.log10(mmin)
logmmax = np.log10(mmax)
width = (logmmax - logmmin)/(mpoints-1)
masses = []
for index in range(mpoints) :
masses.append(math.pow(10., logmmin+index*width))
# Return
return masses
@property
def delta(self) :
"""
Return what kind of dark matter particle is
used to compute the dm flux
"""
# Return
return self._delta
@delta.setter
def delta(self, delta) :
"""
Set the value of delta to describe what kind of
dark matter particle is used to compute the
dm flux.
Parameters
----------
delta : String, either Majorana or Dirac
"""
# Just to check that delta is valid
if delta not in ALLOWED_FERMIONS :
raise ValueError(('\nKind of Dark matter particle not ' +
'supported.\nOptions are:{0}'.format(ALLOWED_FERMIONS)))
# Set minimum energy
self._delta = delta
# Return
return
@property
def hasEW(self) :
"""
Return whether EW corrections are included or not
"""
# Return
return self._dminterp.hasEW
@hasEW.setter
def hasEW(self, has_EW) :
"""
Include EW corrections in computation of DM spectra
"""
self._dminterp.hasEW = has_EW
# Update the tuple of allowed channels
if has_EW :
self._allowed_channels = ALLOWED_CHANNELS
else :
self._allowed_channels = ALLOWED_CHANNELSNOEW
# Return
return
@property
def allowed_channels(self) :
"""
Return tuple of allowed channels according to
whether or not to include EW corrections in spectra
"""
# Return
return self._allowed_channels
@property
def tablemodel(self) :
"""
Return GModelSpectralTable
"""
# Return
return self._model
@property
def process(self) :
"""
Return dm process
"""
# Return
return self._dminterp.process
@process.setter
def process(self, process_vals) :
"""
Set annihilation (anna) or decay process in dminterp
Also update the properties jfactor and sigmav for anna
or dfactor and lifetime for decay
"""
# Extract values
dmprocess = process_vals[0]
astfactor = process_vals[1]
paroi = process_vals[2]
# Check that process is valid
VALID_PROCESSES = ['anna', 'decay']
if dmprocess not in VALID_PROCESSES :
msg = 'Valid options are: {0}'.format(VALID_PROCESSES)
raise ValueError(msg)
if astfactor < 1.e-40 or paroi < 1.e-40 :
raise ValueError('\nParameters must be greater than 1.e-40.')
# Update properties
if dmprocess == 'anna' :
self._jfactor = astfactor
self._sigmav = paroi
elif dmprocess == 'decay' :
self._dfactor = astfactor
self._lifetime = paroi
self._dminterp.process = dmprocess
# Update
# Return
return
@property
def elist(self) :
"""
Return list of energy values used to compute the spectrum
"""
# Return
return self._dminterp.energy
@elist.setter
def elist(self, evals) :
"""
Update energy values used to compute the spectrum
evals[0] --> emin
evals[1] --> emax
evals[2] --> epoints
"""
# Check that emin and emax are ok
# Note, that I set the minimum to 500 MeV
# There is no meaning to go to lower energies
# In the case of CTA
if evals[0] < 5.0e-3 or evals[1] > 1.e+5 :
raise ValueError('\nParameters outside of range')
# Update properties
self._dminterp.energy = evals
# Return
return
@staticmethod
def _norm_anna(sigmav, mass, delta, jfactor) :
"""
Compute normalization of the dm flux compatible with gammalib
Parameters
----------
sigmav : Value of annihilation cross-section (cm**3/s)
mass : Mass of dark matter particles (GeV)
delta : String to indicate if dark matter is a
Majorana or Dirac fermion
jfactor : Astrophysica factor for annihilation
Return
------
norm : (1/[MeV* cm^2 * s])
"""
d = 0.
# Check delta
if delta == 'Majorana' :
d = 2.
elif delta == 'Dirac' :
d = 4.
# Compute ppfactor
ppfactor = sigmav / (d*4.*gammalib.pi*mass*mass)
norm = ppfactor * jfactor
return norm * 1.0e-3
@staticmethod
def _norm_decay(lifetime, mass, dfactor) :
"""
Compute normalization of the dm flux compatible with gammalib
Parameters
----------
lifetime : Value of decay lifetime (s)
mass : Mass of dark matter particles (GeV)
dfactor : Astrophysical factor for ecay
Return
------
norm : (1/[MeV* cm^2 * s])
"""
# Compute ppfactor
ppfactor = 1 / (4.*gammalib.pi*mass*lifetime)
norm = ppfactor * dfactor
return norm * 1.0e-3
def create_modeltable(self) :
"""
Create fits table with spectrum and channels
"""
# Get list of channel indices
# First, I get the number of channels and energy points
# I don't want to access a private member from dmspectrum
# class, but I can get the number of points from the
# energy array
ch_indices = [i for i in range(len(self._allowed_channels))]
n_chs = len(ch_indices)
n_eng = len(self._dminterp.energy)
# Array with definitions of energy bins
gemin = gammalib.GEnergy(self._dminterp.emin, 'GeV')
gemax = gammalib.GEnergy(self._dminterp.emax, 'GeV')
ebins = gammalib.GEbounds(n_eng, gemin, gemax)
# Then create the GModelPar objects for mass and channel
# I know, default channel is hard coded, but we don't need
# to select any particular channel at this moment.
# Select Tau channel is just for initialization
dmmass = gammalib.GModelPar('Mass', self._mmin, 1.0)
dmmass.unit('GeV')
index = self._allowed_channels.index('Tau')
dmchannel = gammalib.GModelPar('Channel', index, 1.0)
# Create the GSpectralTablePar objects
par_mass = gammalib.GModelSpectralTablePar(dmmass, self._masses)
par_channel = gammalib.GModelSpectralTablePar(dmchannel, ch_indices)
# Create the container GSpectralTablePars and append the pars
pars = gammalib.GModelSpectralTablePars()
pars.append(par_mass)
pars.append(par_channel)
# GNdarray to save the spectra
spectra = gammalib.GNdarray(self._mpoints,n_chs,n_eng)
# filling the spectrum
desc = 'Computing {}-spectrrum'.format(self._dminterp.process)
for index, mass in tqdm(enumerate(self._masses),desc=desc,leave=False):
# Change the value of the mass
self._dminterp.mass = mass
for cindex, thisch in enumerate(self._allowed_channels):
# Modified the instance of dmspectrum
# to match values for every channel
# I don't need to change the array for energy
# And also, I don't need to check whether I want
# to include EW corrections or not
self._dminterp.channel = thisch
dmspec = self._dminterp.spectra()
for eindex in range(n_eng):
spectra[index, cindex, eindex] = dmspec[eindex]
# Get ppfactor and normalization
# This normalization computed here
# is not neccessary. You can change the normalization
# of the GModelSpectralTable later during simulation
# or analysis steps via GModelSpectralTable methods
norm = 0.0
minval = 0.0
maxval = 1.0e+20
if self._dminterp.process == 'anna' :
norm = self._norm_anna(self._sigmav, self._mmin,
self._delta, self._jfactor)
elif self._dminterp.process == 'decay' :
norm = self._norm_decay(self._lifetime, self._mmin, self._dfactor)
# Tuning the ModelSpectralTable
# I set the interpolation method of masses to logarithmic
# Mass and channel are fixed.
# Particularly, it's mandatory that channel parameter is fixed
model = gammalib.GModelSpectralTable(ebins, pars, spectra)
model.table_par('Mass').method(1)
model.table_par('Channel').method(0)
model['Mass'].fix()
model['Channel'].fix()
model['Normalization'].value(norm)
model['Normalization'].scale(1.0)
model['Normalization'].range(minval, maxval)
self._model = model
# Return
return
def save(self) :
"""
Save the DM table
"""
process = self._dminterp.process
ew = int(self._dminterp.hasEW)
name = 'DMModel{0}{1}EW{2}.fits'.format(process, self._srcname, ew)
self._model.save(name, True)
return
@ValidValue("_dfactor", min_val=1.e-40)
@ValidValue('_lifetime', min_val=1.e-40)
@ValidValue("_jfactor", min_val=1.e-40)
@ValidValue('_sigmav', min_val=1.e-40)
@ValidString('_delta', empty_allowed=False, options=ALLOWED_FERMIONS)
@ValidValue('_mmin', min_val=10.0)
@ValidValue('_mmax', max_val=1.e+5)
@ValidString('_srcname', empty_allowed=False)
class dmtable_ch() :
"""
Class to compute the flux generated
from annihilation of dark matter
particles.
dmflux is a derived class from dmspectrum.
the suffix 'ch' stands for single channel,
so the class only create table-models for
specific channels
"""
# Init
def __init__(self, srcname, mmin, mmax, mpoints, dminterp,
channel='Tau', delta='Majorana', sigmav=3.6e-26, jfactor=1.e+19,
lifetime=1.e+30, dfactor=1.e+19) :
"""
Initialize the dmflux_anna class
Parameters:
----------
srcname : Name of the target or family targets
mmin : Min Mass of dark matter candidate
mmax : Max Mass of dark matter candidate
mpoints : Number of mass points to create the Fits table
dminterp : dmspectrum class instance (I avoid to write a lot
of code I already have)
delta : Parameter to describe if dark matter candidate
is a Majorana (delta=2) fermion or a
Dirac (delta=4) fermion
sigmav : Annihilation cross-section (in cm**3/s)
jfactor : Astrophysical factor in (GeV**2/cm**5)
lifetime : Decay lifetime (in s)
dfactor : Astrophysical factor in (GeV/cm**2)
"""
# And, I check that mmin < mmax, if not, then reverse the order
if mmin > mmax :
msg = ('\nI found that Minimum mass {0} '.format(mmin) +
'is greater than Maximum mass {0}.\n'.format(mmax) +
'Changing the order...')
warnings.warn(msg, RuntimeWarning)
m_min = mmax
m_max = mmin
else :
m_min = mmin
m_max = mmax
# Initialize parameters of dmflux_ana class
self._srcname = srcname
self._sigmav = sigmav
self._jfactor = jfactor
self._lifetime = lifetime
self._dfactor = dfactor
self._delta = delta
self._mmin = m_min
self._mmax = m_max
self._mpoints = mpoints
self._channel = channel
if not isinstance(dminterp, dmspectrum) :
msg = 'dminterp must be an instance of dmspectrum class'
raise TypeError(msg)
else :
self._dminterp = dminterp
self._masses = self._marray(m_min, m_max, mpoints)
if dminterp.hasEW :
self._allowed_channels = ALLOWED_CHANNELS
else :
self._allowed_channels = ALLOWED_CHANNELSNOEW
# Check if channel is valid
if channel not in self._allowed_channels:
msg = ('\nChannel {0} not found in'.format(channel) +
'allowed channels. Options are: {0}'.format(ALLOWED_FERMIONS))
raise ValueError(msg)
# Update channel property of spectrum interpolator dminterp
# Only if the channels are different
if dminterp.channel != channel :
dminterp.channel = channel
self._model = None
# Return
return
@property
def sigmav(self) :
"""
Return value of the annihilation cross-section
used to compute the flux
"""
# Return
return self._sigmav
@sigmav.setter
def sigmav(self, sigmav) :
"""
Set the value of Annihilation cross-section (in cm**3/s)
used to compute the flux
Parameters
----------
sigmav : Annihilation cross-section (cm**3/s)
"""
# Check that sigmav is greater than 1.e-35
if sigmav < 1.e-40 :
raise ValueError(('\nValue of annihilation cross-section ' +
' must be greater than 1.e-40.\n' +
'This is just to avoid possible round errors'))
# Set sigmav
self._sigmav = sigmav
# Return
return
@property
def lifetime(self) :
"""
Return value of the decay lifetime
used to compute the flux
"""
# Return
return self._lifetime
@lifetime.setter
def lifetime(self, tau_chi) :
"""
Set the value of decay lifetime (in s)
used to compute the flux
Parameters
----------
tau_chi : Annihilation cross-section (cm**3/s)
"""
# Check that sigmav is greater than 1.e-35
if tau_chi < 1.e-40 :
raise ValueError(('\nValue of decay lifetime ' +
' must be greater than 1.e-40.\n' +
'This is just to avoid possible round errors'))
# Set sigmav
self._lifetime = tau_chi
# Return
return
@property
def jfactor(self) :
"""
Return the value of the Astrophysical factor
used to compute the flux
"""
# Return
return self._jfactor
@jfactor.setter
def jfactor(self, jfactor) :
"""
Set the value of the Astrophysical factor (GeV**2/cm**5)
to compute the dm flux
Parameters
----------
jfactor : Astrophysical factor J (GeV**2/cm**5)
"""
if jfactor < 1.e-40 :
raise ValueError('\nValue of jfactor must be greater than 1.e-40.')
# Set the jfactor
self._jfactor = jfactor
# Return
return
@property
def dfactor(self) :
"""
Return the value of the Astrophysical factor
used to compute the flux
"""
# Return
return self._dfactor
@dfactor.setter
def dfactor(self, dfactor) :
"""
Set the value of the Astrophysical factor (GeV/cm**2)
to compute the dm flux
Parameters
----------
dfactor : Astrophysical factor D (GeV/cm**2)
"""
# Set the jfactor
self._dfactor = dfactor
# Return
return
@property
def mmin(self) :
"""
Return Minimum value mass (GeV) used to compute
the dm flux
"""
# Return
return self._mmin
@mmin.setter
def mmin(self, m_min) :
"""
Set the value of minimum mass (GeV) used to compute
the dm flux
"""
# Just check that the minimum mass is greater than
# 10.0 GeV.
if m_min < 10. :
raise ValueError(('\nMinimum mass {0} GeV '.format(m_min) +
'is below the allowed value (10GeV)'))
# Set minimum energy
self._mmin = m_min
# Update masses
mvalues = self._marray(self._mmin, self._mmax, self._mpoints)
self._masses = mvalues
# Return
return
@property
def mmax(self) :
"""
Return Maximum value of mass (GeV) used to compute
the dm flux
"""
# Return
return self._mmax
@mmax.setter
def mmax(self, m_max) :
"""
Set the value of minimum mass (GeV) used to compute
the dm flux
"""
if m_max > 1.e+5 :
raise ValueError(('\nMaximum mass {0} GeV '.format(m_max) +
'is above the allowed value (1.e+5GeV)'))
# Set minimum energy
self._mmax = m_max
# Update masses
mvalues = self._marray(self._mmin, self._mmax, self._mpoints)
self._masses = mvalues
# Return
return
@property
def masses(self) :
"""
Return the values of the energy array used to compute the spectrum
"""
# Return
return self._masses
@masses.setter
def masses(self, m_vals) :
"""
Set the masses used to compute the spectrum
Parameters
----------
- evals : tuple with:
- mmin : Minimum mass (GeV)
- mmax : Maximum mass (GeV)
- mpoints : Number of points to create the array
"""
mmin, mmax, mpoints = m_vals
# Check if emin and emax are valid
if mmin < 10.0 :
raise ValueError(('Mass {0} '.format(mmin) +
'is lower than the allowed value 10.0'))
if mmax > 1.e+5 :
raise ValueError(('Mass {0} '.format(mmax) +
'is greater than the allowed value 1.e+5'))
# Create energy array
mvalues = self._marray(mmin, mmax, mpoints)
self._masses = mvalues
# Return
return
@staticmethod
def _marray(mmin, mmax, mpoints) :
"""
Create list of masses to generate the fits table.
The calculation is based in the number of points
The masses are computed assuming logarithmic distance
"""
logmmin = np.log10(mmin)
logmmax = np.log10(mmax)
width = (logmmax - logmmin)/(mpoints-1)
masses = []
for index in range(mpoints) :
masses.append(math.pow(10., logmmin+index*width))
# Return
return masses
@property
def delta(self) :
"""
Return what kind of dark matter particle is
used to compute the dm flux
"""
# Return
return self._delta
@delta.setter
def delta(self, delta) :
"""
Set the value of delta to describe what kind of
dark matter particle is used to compute the
dm flux.
Parameters
----------
delta : String, either Majorana or Dirac
"""
# Just to check that delta is valid
if delta not in ALLOWED_FERMIONS :
raise ValueError(('\nKind of Dark matter particle not ' +
'supported.\nOptions are:{0}'.format(ALLOWED_FERMIONS)))
# Set minimum energy
self._delta = delta
# Return
return
@property
def hasEW(self) :
"""
Return whether EW corrections are included or not
"""
# Return
return self._dminterp.hasEW
@hasEW.setter
def hasEW(self, has_EW) :
"""
Include EW corrections in computation of DM spectra
"""
self._dminterp.hasEW = has_EW
# Update the tuple of allowed channels
if has_EW :
self._allowed_channels = ALLOWED_CHANNELS
else :
self._allowed_channels = ALLOWED_CHANNELSNOEW
# Return
return
@property
def allowed_channels(self) :
"""
Return tuple of allowed channels according to
whether or not to include EW corrections in spectra
"""
# Return
return self._allowed_channels
@property
def channel(self) :
'''
Return channel used to compute the gamma-ray flux
'''
# Return
return self._channel
@channel.setter
def channel(self, ch) :
'''
Set channel used to compute the dmspectrum.
Also updates the channel parameter of the
spectrum interpolator dminterp
If channel is not valid, raise value error
'''
# Check if channel is valid
if ch not in self._allowed_channels :
msg = ('\nChannel {0} not found in'.format(channel) +
'allowed channels. Options are: {0}'.format(ALLOWED_FERMIONS))
raise ValueError(msg)
# Set channel
self._channel = ch
# Update dminterp instance
self._dminterp.channel = ch
# Return
return
@property
def tablemodel(self) :
"""
Return GModelSpectralTable
"""
# Return
return self._model
@property
def process(self) :
"""
Return dm process
"""
# Return
return self._dminterp.process
@process.setter
def process(self, process_vals) :
"""
Set annihilation (anna) or decay process in dminterp
Also update the properties jfactor and sigmav for anna
or dfactor and lifetime for decay
"""
# Extract values
dmprocess = process_vals[0]
astfactor = process_vals[1]
paroi = process_vals[2]
# Check that process is valid
VALID_PROCESSES = ['anna', 'decay']
if dmprocess not in VALID_PROCESSES :
msg = 'Valid options are: {0}'.format(VALID_PROCESSES)
raise ValueError(msg)
if astfactor < 1.e-40 or paroi < 1.e-40 :
raise ValueError('\nParameters must be greater than 1.e-40.')
# Update properties
if dmprocess == 'anna' :
self._jfactor = astfactor
self._sigmav = paroi
elif dmprocess == 'decay' :
self._dfactor = astfactor
self._lifetime = paroi
self._dminterp.process = dmprocess
# Update
# Return
return
@property
def elist(self) :
"""
Return list of energy values used to compute the spectrum
"""
# Return
return self._dminterp.energy
@elist.setter
def elist(self, evals) :
"""
Update energy values used to compute the spectrum
evals[0] --> emin
evals[1] --> emax
evals[2] --> epoints
"""
# Check that emin and emax are ok
# I set the minimum to 500 MeV
if evals[0] < 5.0e-3 or evals[1] > 1.e+5 :
raise ValueError('\nParameters outside of range')
# Update properties
self._dminterp.energy = evals
# Return
return
@staticmethod
def _norm_anna(sigmav, mass, delta, jfactor) :
"""
Compute normalization of the dm flux compatible with gammalib
Parameters
----------
sigmav : Value of annihilation cross-section (cm**3/s)
mass : Mass of dark matter particles (GeV)
delta : String to indicate if dark matter is a
Majorana or Dirac fermion
jfactor : Astrophysica factor for annihilation
Return
------
norm : (1/[MeV* cm^2 * s])
"""
d = 0.
# Check delta
if delta == 'Majorana' :
d = 2.
elif delta == 'Dirac' :
d = 4.
# Compute ppfactor
ppfactor = sigmav / (d*4.*gammalib.pi*mass*mass)
norm = ppfactor * jfactor
return norm * 1.0e-3
@staticmethod
def _norm_decay(lifetime, mass, dfactor) :
"""
Compute normalization of the dm flux compatible with gammalib
Parameters
----------
lifetime : Value of decay lifetime (s)
mass : Mass of dark matter particles (GeV)
dfactor : Astrophysical factor for ecay
Return
------
norm : (1/[MeV* cm^2 * s])
"""
# Compute ppfactor
ppfactor = 1 / (4.*gammalib.pi*mass*lifetime)
norm = ppfactor * dfactor
return norm * 1.0e-3
def create_modeltable(self) :
"""
Create fits table with spectrum for specific channel
"""
# Number of points in energy array
n_eng = len(self._dminterp.energy)
# Array with definitions of energy bins
# The min and max values are encapsulated in the
# dm spectrum interpolator dminterp
gemin = gammalib.GEnergy(self._dminterp.emin, 'GeV')
gemax = gammalib.GEnergy(self._dminterp.emax, 'GeV')
ebins = gammalib.GEbounds(n_eng, gemin, gemax)
# Then create the GModelPar objects for mass
dmmass = gammalib.GModelPar('Mass', self._mmin, 1.0)
dmmass.unit('GeV')
# Create the GSpectralTablePar objects
par_mass = gammalib.GModelSpectralTablePar(dmmass, self._masses)
# Create the container GSpectralTablePars and append the pars
pars = gammalib.GModelSpectralTablePars()
pars.append(par_mass)
# GNdarray to save the spectra
spectra = gammalib.GNdarray(self._mpoints,n_eng)
# filling the spectrum
desc = 'Computing {}-spectrrum'.format(self._dminterp.process)
for index, mass in tqdm(enumerate(self._masses),desc=desc,leave=False):
# Change the value of the mass
self._dminterp.mass = mass
dmspec = self._dminterp.spectra()
for eindex in range(n_eng):
spectra[index, eindex] = dmspec[eindex]
# Get ppfactor and normalization
# This normalization computed here
# is not neccessary. You can change the normalization
# of the GModelSpectralTable later during simulation
# or analysis steps via GModelSpectralTable methods
norm = 0.0
minval = 0.0
maxval = 1.0e+20
if self._dminterp.process == 'anna' :
norm = self._norm_anna(self._sigmav, self._mmin,
self._delta, self._jfactor)
elif self._dminterp.process == 'decay' :
norm = self._norm_decay(self._lifetime, self._mmin, self._dfactor)
# Tuning the ModelSpectralTable
# I set the interpolation method of masses to logarithmic
# Mass is a fixed parameter
model = gammalib.GModelSpectralTable(ebins, pars, spectra)
model.table_par('Mass').method(1)
model['Mass'].scale(1.)
model['Mass'].fix()
model['Normalization'].value(norm)
model['Normalization'].scale(1.0)
model['Normalization'].range(minval, maxval)
self._model = model
# Return
return
def save(self) :
"""
Save the DM table
"""
process = self._dminterp.process
ew = int(self._dminterp.hasEW)
src = self._srcname
ch = self._channel
name = 'DMModel{0}{1}EW{2}Ch{3}.fits'.format(process, src, ew, ch)
self._model.save(name, True)
return
| 29.33307
| 80
| 0.550518
| 4,203
| 37,165
| 4.782774
| 0.090174
| 0.03343
| 0.023878
| 0.027062
| 0.903393
| 0.899314
| 0.892747
| 0.890558
| 0.890558
| 0.886081
| 0
| 0.014106
| 0.360985
| 37,165
| 1,266
| 81
| 29.35624
| 0.83233
| 0.335907
| 0
| 0.901515
| 0
| 0
| 0.100358
| 0.002344
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117424
| false
| 0
| 0.013258
| 0
| 0.251894
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e8794a4c61ca7c1ebe2d1b1ebb59c213552321c2
| 202
|
py
|
Python
|
fluidml/storage/__init__.py
|
fluidml/fluidml
|
4bee9b180d84ea9c7e5d98e1ffc48fcc5080e045
|
[
"Apache-2.0"
] | 15
|
2021-01-06T14:03:49.000Z
|
2022-03-04T19:30:27.000Z
|
fluidml/storage/__init__.py
|
fluidml/fluidml
|
4bee9b180d84ea9c7e5d98e1ffc48fcc5080e045
|
[
"Apache-2.0"
] | 16
|
2021-01-24T00:24:39.000Z
|
2021-08-04T20:56:03.000Z
|
fluidml/storage/__init__.py
|
fluidml/fluidml
|
4bee9b180d84ea9c7e5d98e1ffc48fcc5080e045
|
[
"Apache-2.0"
] | 1
|
2021-07-09T06:34:49.000Z
|
2021-07-09T06:34:49.000Z
|
from .base import ResultsStore
from .file_store import LocalFileStore, TypeInfo
from .in_memory_store import InMemoryStore
try:
from .mongo_db_store import MongoDBStore
except ImportError:
pass
| 25.25
| 48
| 0.821782
| 26
| 202
| 6.192308
| 0.692308
| 0.204969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143564
| 202
| 7
| 49
| 28.857143
| 0.930636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.142857
| 0.714286
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
e8c676555b79ea83578a0e61486a92abaa2f6e96
| 68,608
|
py
|
Python
|
benchmarks/SimResults/combinations_spec_mylocality/cmp_astarlbmtontoh264ref/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_mylocality/cmp_astarlbmtontoh264ref/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_mylocality/cmp_astarlbmtontoh264ref/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0543263,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.245359,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.255278,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.586053,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.01483,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.582035,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.18292,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.540152,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.43388,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0482275,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0212449,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.175515,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.157119,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.223743,
'Execution Unit/Register Files/Runtime Dynamic': 0.178364,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.439,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.26296,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 4.27498,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00251342,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00251342,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00218375,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000842395,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00225703,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00946762,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0242926,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.151043,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.418377,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.513008,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.11619,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0343782,
'L2/Runtime Dynamic': 0.0108983,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 6.75935,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.671,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.178657,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.178657,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.60645,
'Load Store Unit/Runtime Dynamic': 3.73073,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.440539,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.881077,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.156349,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.156777,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0688451,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.82879,
'Memory Management Unit/Runtime Dynamic': 0.225623,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.4339,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.168255,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0319922,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.31027,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.510518,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 9.86894,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.047482,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.239983,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.288255,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.208254,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.335906,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.169554,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.713714,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.193989,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.69215,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0544575,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00873511,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.079648,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0646015,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.134105,
'Execution Unit/Register Files/Runtime Dynamic': 0.0733366,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.179669,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.481335,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.91375,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000923049,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000923049,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000824222,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000330144,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000928006,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00359832,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00812671,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0621031,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.95029,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.159691,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.21093,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.36052,
'Instruction Fetch Unit/Runtime Dynamic': 0.444449,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0433761,
'L2/Runtime Dynamic': 0.0227489,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.3162,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.05052,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0672632,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0672633,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.63383,
'Load Store Unit/Runtime Dynamic': 1.44951,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.16586,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.331719,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0588641,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0594792,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.245614,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0262868,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.502841,
'Memory Management Unit/Runtime Dynamic': 0.085766,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 18.8222,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.143253,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0111392,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.10494,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.259333,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.17555,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0775647,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.263611,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.472138,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.175616,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.283262,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.142981,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.60186,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.128467,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.87559,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0891969,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00736613,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0801393,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.054477,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.169336,
'Execution Unit/Register Files/Runtime Dynamic': 0.0618431,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.188227,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.456293,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.78898,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000602203,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000602203,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000538888,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000216471,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000782567,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00252586,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00526046,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0523702,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.33119,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.129583,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.177873,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.71138,
'Instruction Fetch Unit/Runtime Dynamic': 0.367612,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0663645,
'L2/Runtime Dynamic': 0.0376611,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.85292,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.865697,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0522748,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0522748,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.09977,
'Load Store Unit/Runtime Dynamic': 1.17577,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.128901,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.257802,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0457473,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0466995,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.207121,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0213751,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.441816,
'Memory Management Unit/Runtime Dynamic': 0.0680746,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.7844,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.234637,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0107788,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0846994,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.330115,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.76822,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.00799019,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.208964,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0535811,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.143017,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.230682,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.11644,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.490139,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.155355,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.21525,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0101226,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00599879,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0459459,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0443647,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0560686,
'Execution Unit/Register Files/Runtime Dynamic': 0.0503635,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0987932,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.275738,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.43058,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0012486,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0012486,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00114119,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000471117,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000637303,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00427569,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0100545,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0426489,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.71284,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.108974,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.144855,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.06301,
'Instruction Fetch Unit/Runtime Dynamic': 0.310808,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0246729,
'L2/Runtime Dynamic': 0.00861813,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.54133,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.642719,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0421942,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0421943,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.74058,
'Load Store Unit/Runtime Dynamic': 0.893001,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.104044,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.208088,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0369255,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0372924,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.168674,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0178755,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.388214,
'Memory Management Unit/Runtime Dynamic': 0.0551679,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.0212,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0266282,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00677661,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0735516,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.106956,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.80513,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 5.296277511313148,
'Runtime Dynamic': 5.296277511313148,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.20534,
'Runtime Dynamic': 0.144375,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 81.267,
'Peak Power': 114.379,
'Runtime Dynamic': 20.7622,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 81.0617,
'Total Cores/Runtime Dynamic': 20.6178,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.20534,
'Total L3s/Runtime Dynamic': 0.144375,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.063457
| 124
| 0.682078
| 8,082
| 68,608
| 5.784212
| 0.067805
| 0.123556
| 0.112946
| 0.093437
| 0.939013
| 0.929837
| 0.917793
| 0.885257
| 0.861449
| 0.842239
| 0
| 0.131932
| 0.224332
| 68,608
| 914
| 125
| 75.063457
| 0.74651
| 0
| 0
| 0.642232
| 0
| 0
| 0.657421
| 0.048099
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fa4e2e4b6eb4f4e2cbfad52d3ed026c70cb917b9
| 14,928
|
py
|
Python
|
tests/test_fireye.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
tests/test_fireye.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
tests/test_fireye.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
# Copyright 2019 Splunk, Inc.
#
# Use of this source code is governed by a BSD-2-clause-style
# license that can be found in the LICENSE-BSD2 file or at
# https://opensource.org/licenses/BSD-2-Clause
import random
from jinja2 import Environment
from .sendmessage import *
from .splunkutils import *
from .timeutils import *
env = Environment()
# <164>fenotify-1590500.warning: CEF:0|FireEye|CMS|9.0.1.923211|MC|malware-callback|7|requestClientApplication=Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0 cn2Label=sid cn2=11111112 cs5Label=cncHost cs5=172.65.203.203 spt=10400 smac=00:1c:7f:3f:a4:4a cn1Label=vlan cn1=0 cs4Label=link cs4=https://uswmsidccm1.cs.ball.com/event_stream/events_for_bot?ev_id\\=1590500 rt=Jan 25 2021 20:37:54 UTC proto=tcp dst=172.65.203.203 externalId=1590500 dmac=7c:ad:4f:10:06:83 dvchost={{ host }} cs6Label=channel cs6=GET /appliance-test/alert.html HTTP/1.1::~~Host: fedeploycheck.fireeye.com::~~User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0::~~Accept: text/html,application/xhtml+xml,application/xml;q\\=0.9,image/webp,*/*;q\\=0.8::~~Accept-Language: en-US,en;q\\=0.5::~~Accept-Encoding: gzip, deflate::~~DNT: 1::~~Connection: keep-alive::~~Cookie: _gcl_au\\=1.1.750220273.1606759464; _lfa\\=LF1.1.6e3cb721e7505c55.1606759467306; apt.uid\\=AP-VMCORKOEGG4K-2-1610403364179-83855235.0.2.bf309e5a-bdbb-4e90-be0b-3c182673fb8a; _uetvid\\=f6904ed04ea311eb9f93275a98a20e01::~~Upgrade-Insecure-Requests: 1::~~::~~ src=162.18.29.1 cn3Label=cncPort cn3=80 dpt=80 request=hxxp://fedeploycheck.fireeye.com/appliance-test/alert.html dvc=10.246.129.27 requestMethod=GET act=notified cs1Label=sname cs1=FETestEvent devicePayloadId=71de5c6d-5faa-4d60-b145-4d060f734023 start=Jan 25 2021 20:37:54 UTC ","PRI":"<164>","MESSAGE":"fenotify-1590500.warning: CEF:0|FireEye|CMS|9.0.1.923211|MC|malware-callback|7|requestClientApplication=Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0 cn2Label=sid cn2=11111112 cs5Label=cncHost cs5=172.65.203.203 spt=10400 smac=00:1c:7f:3f:a4:4a cn1Label=vlan cn1=0 cs4Label=link cs4=https://uswmsidccm1.cs.ball.com/event_stream/events_for_bot?ev_id\\=1590500 rt=Jan 25 2021 20:37:54 UTC proto=tcp dst=172.65.203.203 externalId=1590500 dmac=7c:ad:4f:10:06:83 dvchost={{ host }} cs6Label=channel cs6=GET /appliance-test/alert.html HTTP/1.1::~~Host: fedeploycheck.fireeye.com::~~User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0::~~Accept: text/html,application/xhtml+xml,application/xml;q\\=0.9,image/webp,*/*;q\\=0.8::~~Accept-Language: en-US,en;q\\=0.5::~~Accept-Encoding: gzip, deflate::~~DNT: 1::~~Connection: keep-alive::~~Cookie: _gcl_au\\=1.1.750220273.1606759464; _lfa\\=LF1.1.6e3cb721e7505c55.1606759467306; apt.uid\\=AP-VMCORKOEGG4K-2-1610403364179-83855235.0.2.bf309e5a-bdbb-4e90-be0b-3c182673fb8a; _uetvid\\=f6904ed04ea311eb9f93275a98a20e01::~~Upgrade-Insecure-Requests: 1::~~::~~ src=162.18.29.1 cn3Label=cncPort cn3=80 dpt=80 request=hxxp://fedeploycheck.fireeye.com/appliance-test/alert.html dvc=10.246.129.27 requestMethod=GET act=notified cs1Label=sname cs1=FETestEvent devicePayloadId=71de5c6d-5faa-4d60-b145-4d060f734023 start=Jan 25 2021 20:37:54 UTC
def test_fireeye_cms(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
# dt = datetime.datetime.now(datetime.timezone.utc)
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }}fenotify-1590500.warning: CEF:0|FireEye|CMS|9.0.1.923211|MC|malware-callback|7|requestClientApplication=Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0 cn2Label=sid cn2=11111112 cs5Label=cncHost cs5=172.65.203.203 spt=10400 smac=00:1c:7f:3f:a4:4a cn1Label=vlan cn1=0 cs4Label=link cs4=https://uswmsidccm1.cs.ball.com/event_stream/events_for_bot?ev_id\\=1590500 rt={{ bsd }} UTC proto=tcp dst=172.65.203.203 externalId=1590500 dmac=7c:ad:4f:10:06:83 dvchost={{ host }} cs6Label=channel cs6=GET /appliance-test/alert.html HTTP/1.1::~~Host: fedeploycheck.fireeye.com::~~User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0::~~Accept: text/html,application/xhtml+xml,application/xml;q\\=0.9,image/webp,*/*;q\\=0.8::~~Accept-Language: en-US,en;q\\=0.5::~~Accept-Encoding: gzip, deflate::~~DNT: 1::~~Connection: keep-alive::~~Cookie: _gcl_au\\=1.1.750220273.1606759464; _lfa\\=LF1.1.6e3cb721e7505c55.1606759467306; apt.uid\\=AP-VMCORKOEGG4K-2-1610403364179-83855235.0.2.bf309e5a-bdbb-4e90-be0b-3c182673fb8a; _uetvid\\=f6904ed04ea311eb9f93275a98a20e01::~~Upgrade-Insecure-Requests: 1::~~::~~ src=162.18.29.1 cn3Label=cncPort cn3=80 dpt=80 request=hxxp://fedeploycheck.fireeye.com/appliance-test/alert.html dvc=10.246.129.27 requestMethod=GET act=notified cs1Label=sname cs1=FETestEvent devicePayloadId=71de5c6d-5faa-4d60-b145-4d060f734023 start={{ bsd }} UTC\n"
)
message = mt.render(mark="<111>", bsd=bsd, host=host)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=fireeye host="{{ host }}" sourcetype="fe_cef_syslog"'
)
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# cef[24366]: CEF:0|fireeye|hx|5.0.3|FireEye Acquisition Completed|FireEye Acquisition Completed|0|rt=Jan 26 2021 02:14:17 UTC dvchost={{ host }} deviceExternalId=0CC47AA8D848 categoryDeviceGroup=/IDS/Application/Service categoryDeviceType=Forensic Investigation categoryObject=/Host cs1Label=Host Agent Cert Hash cs1=aL9HjiEIvp8d1kiwieaaHG dst=10.49.2.59 dmac=64-00-6a-54-c4-7a dhost=MZAUNG dntdom=CS deviceCustomDate1Label=Agent Last Audit deviceCustomDate1=Jan 26 2021 02:13:19 UTC cs2Label=FireEye Agent Version cs2=32.30.0 cs5Label=Target GMT Offset cs5=+PT6H30M cs6Label=Target OS cs6=Windows 10 Enterprise 15063 externalId=1003 cs3Label=Script Name cs3=Bulk Acquisition suser=fe_services act=Acquisition Status in=1361 categoryOutcome=/Success categorySignificance=/Informational categoryBehavior=/Access/Start msg=Host MZAUNG Bulk Acquisition completed categoryTupleDescription=A Host Acquisition was successfully completed.
def test_fireeye_hx(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
# dt = datetime.datetime.now(datetime.timezone.utc)
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }}cef[24366]: CEF:0|fireeye|hx|5.0.3|FireEye Acquisition Completed|FireEye Acquisition Completed|0|rt={{ bsd }} UTC dvchost={{ host }} deviceExternalId=0CC47AA8D848 categoryDeviceGroup=/IDS/Application/Service categoryDeviceType=Forensic Investigation categoryObject=/Host cs1Label=Host Agent Cert Hash cs1=aL9HjiEIvp8d1kiwieaaHG dst=10.49.2.59 dmac=64-00-6a-54-c4-7a dhost=MZAUNG dntdom=CS deviceCustomDate1Label=Agent Last Audit deviceCustomDate1=Jan 26 2021 02:13:19 UTC cs2Label=FireEye Agent Version cs2=32.30.0 cs5Label=Target GMT Offset cs5=+PT6H30M cs6Label=Target OS cs6=Windows 10 Enterprise 15063 externalId=1003 cs3Label=Script Name cs3=Bulk Acquisition suser=fe_services act=Acquisition Status in=1361 categoryOutcome=/Success categorySignificance=/Informational categoryBehavior=/Access/Start msg=Host MZAUNG Bulk Acquisition completed categoryTupleDescription=A Host Acquisition was successfully completed.\n"
)
message = mt.render(mark="<111>", bsd=bsd, host=host)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=fireeye host="{{ host }}" sourcetype="hx_cef_syslog"'
)
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# 2021-03-03T20:14:22.226Z CEF:0|FireEye|ETP|3.0|etp|malicious email|10|rt=Mar 03 2021:20:07:54 UTC suser=redacted@redacted.com duser=redacted@redacted.com fname=hxxps://redacted[dot]com/foo fileHash=123456789abcdef destinationDnsDomain=redacted.com externalId=123456789 cs1Label=sname cs1=Phish.LIVE.DTI.URL cs3Label=Subject cs3=Subject Redacted cs4Label=Link cs4=https://etp.us.fireeye.com/alert/123456789/ cs5Label=Client cs5=REDACTED-COMPANY
def test_fireeye_etp(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
# dt = datetime.datetime.now(datetime.timezone.utc)
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions
epoch = epoch[:-7]
mt = env.from_string(
"{{ iso }} CEF:0|FireEye|ETP|3.0|etp|malicious email|10|rt={{ bsd }} UTC suser=redacted@redacted.com duser=redacted@redacted.com fname=hxxps://redacted[dot]com/foo fileHash=123456789abcdef destinationDnsDomain=redacted.com externalId=123456789 cs1Label=sname cs1=Phish.LIVE.DTI.URL cs3Label=Subject cs3=Subject Redacted cs4Label=Link cs4=https://etp.us.fireeye.com/alert/123456789/ cs5Label=Client cs5={{ host }} \n"
)
message = mt.render(mark="<111>", iso=iso, bsd=bsd, host=host)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=fireeye "{{ host }}" sourcetype="fe_etp"'
)
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
def test_fireeye_hx_json(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
# dt = datetime.datetime.now(datetime.timezone.utc)
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions
epoch = epoch[:-3]
mt = env.from_string(
'<164>fenotify-7441437.warning: { "msg": "normal", "appliance-id": "xxxxx", "product": "HX", "appliance": "AAAAA-D-PR-FIREEYEHX01-ISD-MTE.xxxxx", "version": "5.0.0.0000", "alert": { "host": { "gmt_offset_seconds": 39600, "agent_version": "32.30.0", "hostname": "{{ host }}", "os": "Windows 10 Enterprise", "ip": "10.42.100.7", "agent_id": "xxxxxx", "containment_state": "normal", "domain": "CORPTESTAU" }, "matched_at": "2021-02-25T06:02:37+00:00", "condition": { "_id": "111111", "tests": [ { "operator": "contains", "token": "processEvent/processCmdLine", "value": "cmd", "type": "text" }, { "operator": "equal", "token": "processEvent/process", "value": "psexec.exe", "type": "text" }, { "operator": "matches", "token": "processEvent/processCmdLine", "value": "\\\\\\\\\\\\\\\\", "type": "text" } ], "enabled": true }, "resolution": "ALERT", "_id": 7536719, "reported_at": "2021-02-25T06:02:54.035+00:00", "sysinfo": { "_id": "xxxxxx", "mac_address": "xxxxxx" }, "indicator": { "display_name": "T1035-SERVICE-EXEC_PsExec", "_id": "xxxxxx", "uri_name": "xxxxxx", "description": "Adversaries may execute a binary, command, or script via a method that interacts with Windows services, such as the Service Control Manager. This can be done by either creating a new service or modifying an existing service. This technique is the execution used in conjunction with New Service and Modify Existing Service during service persistence or privilege escalation.\\n\\nPsExec allows redirects of the input and output of a remotely started executable through the use of SMB and the hidden $ADMIN share on the remote system. With this share, PsExec uses the Windows Service control Manager API to start the PsExecsvc service on the remote system which creates a named pipe that PsExec communicates with. This named pipe is what allows for input/output redirection back to the system that launched PsExec.", "category_id": 2, "signature": null }, "indicator_category": { "_id": 2, "uri_name": "Custom" }, "event_id": 111111, "event_at": "2021-02-25T06:02:28.113+00:00", "source": "IOC", "event_type": "processEvent", "matched_source_alerts": null, "event_values": { "processEvent/startTime": "2021-02-25T06:02:28.113Z", "processEvent/timestamp": "{{ iso }}", "processEvent/parentProcess": "cmd.exe", "processEvent/eventType": "start", "processEvent/parentPid": 7880, "processEvent/processPath": "C:\\\\build\\\\PSTools\\\\PsExec.exe", "processEvent/pid": 12320, "processEvent/process": "PsExec.exe", "processEvent/parentProcessPath": "C:\\\\Windows\\\\System32\\\\cmd.exe", "processEvent/md5": "111111", "processEvent/username": "CORPTESTAU\\\\xxxxxxx", "processEvent/processCmdLine": "psexec \\\\\\\\auae0501vt1038 cmd" }, "uuid": "b36909ea-948f-44db-8319-76526cb64b40", "name": "indicator-executed" } } \n'
)
message = mt.render(mark="<111>", bsd=bsd, host=host, iso=iso)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=fireeye host="{{ host }}" sourcetype="hx_json"'
)
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
| 105.87234
| 3,761
| 0.693462
| 1,998
| 14,928
| 5.114114
| 0.247748
| 0.021922
| 0.01331
| 0.019573
| 0.768644
| 0.762576
| 0.760325
| 0.760325
| 0.760325
| 0.760325
| 0
| 0.120575
| 0.161643
| 14,928
| 140
| 3,762
| 106.628571
| 0.695885
| 0.318998
| 0
| 0.646341
| 0
| 0.04878
| 0.686767
| 0.214602
| 0
| 0
| 0
| 0
| 0.04878
| 1
| 0.04878
| false
| 0
| 0.060976
| 0
| 0.109756
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fa59008d5e00948fc70e6a8140c91c646ce814f5
| 2,317
|
py
|
Python
|
terrascript/opc/r.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/opc/r.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/opc/r.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/opc/r.py
import terrascript
class opc_compute_acl(terrascript.Resource):
pass
class opc_compute_image_list(terrascript.Resource):
pass
class opc_compute_image_list_entry(terrascript.Resource):
pass
class opc_compute_instance(terrascript.Resource):
pass
class opc_compute_ip_address_reservation(terrascript.Resource):
pass
class opc_compute_ip_association(terrascript.Resource):
pass
class opc_compute_ip_network(terrascript.Resource):
pass
class opc_compute_ip_network_exchange(terrascript.Resource):
pass
class opc_compute_ip_reservation(terrascript.Resource):
pass
class opc_compute_machine_image(terrascript.Resource):
pass
class opc_compute_route(terrascript.Resource):
pass
class opc_compute_security_application(terrascript.Resource):
pass
class opc_compute_security_association(terrascript.Resource):
pass
class opc_compute_security_ip_list(terrascript.Resource):
pass
class opc_compute_security_list(terrascript.Resource):
pass
class opc_compute_security_rule(terrascript.Resource):
pass
class opc_compute_sec_rule(terrascript.Resource):
pass
class opc_compute_ssh_key(terrascript.Resource):
pass
class opc_compute_storage_attachment(terrascript.Resource):
pass
class opc_compute_storage_volume(terrascript.Resource):
pass
class opc_compute_storage_volume_snapshot(terrascript.Resource):
pass
class opc_compute_vnic_set(terrascript.Resource):
pass
class opc_compute_security_protocol(terrascript.Resource):
pass
class opc_compute_ip_address_prefix_set(terrascript.Resource):
pass
class opc_compute_ip_address_association(terrascript.Resource):
pass
class opc_compute_snapshot(terrascript.Resource):
pass
class opc_compute_orchestrated_instance(terrascript.Resource):
pass
class opc_compute_vpn_endpoint_v2(terrascript.Resource):
pass
class opc_lbaas_certificate(terrascript.Resource):
pass
class opc_lbaas_listener(terrascript.Resource):
pass
class opc_lbaas_load_balancer(terrascript.Resource):
pass
class opc_lbaas_policy(terrascript.Resource):
pass
class opc_lbaas_server_pool(terrascript.Resource):
pass
class opc_storage_container(terrascript.Resource):
pass
class opc_storage_object(terrascript.Resource):
pass
| 21.063636
| 64
| 0.813984
| 286
| 2,317
| 6.227273
| 0.174825
| 0.157215
| 0.451993
| 0.534531
| 0.864121
| 0.864121
| 0.677709
| 0.298147
| 0
| 0
| 0
| 0.000493
| 0.123867
| 2,317
| 109
| 65
| 21.256881
| 0.876847
| 0.008632
| 0
| 0.492958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.492958
| 0.014085
| 0
| 0.507042
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
d7115cd7d47a765c510ee45420fe90a7787e4b10
| 8,241
|
py
|
Python
|
tests/test_to_pubkey.py
|
pmazzocchi/btclib
|
5e57a741907eb9b7b827e700e59be824398e3e5d
|
[
"MIT"
] | 1
|
2019-01-30T16:43:32.000Z
|
2019-01-30T16:43:32.000Z
|
tests/test_to_pubkey.py
|
pmazzocchi/btclib
|
5e57a741907eb9b7b827e700e59be824398e3e5d
|
[
"MIT"
] | null | null | null |
tests/test_to_pubkey.py
|
pmazzocchi/btclib
|
5e57a741907eb9b7b827e700e59be824398e3e5d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
import unittest
from btclib import bip32
from btclib.alias import INF
from btclib.base58 import b58encode
from btclib.curves import secp256k1 as ec
from btclib.secpoint import bytes_from_point
from btclib.to_pubkey import to_pubkey_bytes, to_pubkey_tuple
class TestToPubKey(unittest.TestCase):
def test_to_pub_tuple(self):
xpub = b'xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8'
xpub_str = xpub.decode('ascii')
xpub_dict = bip32.deserialize(xpub)
P_compr = xpub_dict['key']
P_compr_hexstr = P_compr.hex()
P = xpub_dict['Q']
P_uncompr = bytes_from_point(P, False, ec)
P_uncompr_hexstr = P_uncompr.hex()
# BIP32
self.assertEqual(to_pubkey_tuple(xpub, ec), P)
self.assertEqual(to_pubkey_tuple(xpub_str, ec), P)
self.assertEqual(to_pubkey_tuple(' ' + xpub_str + ' ', ec), P)
self.assertEqual(to_pubkey_tuple(xpub_dict, ec), P)
# compressed SEC Octets (bytes or hex-string, with 02 or 03 prefix)
self.assertEqual(to_pubkey_tuple(P_compr, ec), P)
self.assertRaises(ValueError, to_pubkey_tuple, b'\x00' + P_compr, ec)
self.assertEqual(to_pubkey_tuple(P_compr_hexstr, ec), P)
self.assertEqual(to_pubkey_tuple(' ' + P_compr_hexstr + ' ', ec), P)
self.assertRaises(ValueError, to_pubkey_tuple, P_compr_hexstr + '00', ec)
# uncompressed SEC Octets (bytes or hex-string, with 04 prefix)
self.assertEqual(to_pubkey_tuple(P_uncompr, ec), P)
self.assertRaises(ValueError, to_pubkey_tuple, b'\x00' + P_uncompr, ec)
self.assertEqual(to_pubkey_tuple(P_uncompr_hexstr, ec), P)
self.assertEqual(to_pubkey_tuple(' ' + P_uncompr_hexstr + ' ', ec), P)
self.assertRaises(ValueError, to_pubkey_tuple, P_uncompr_hexstr + '00', ec)
# native tuple
self.assertEqual(to_pubkey_tuple(P, ec), P)
# pubkey input
xprv = b"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi"
self.assertRaises(ValueError, to_pubkey_tuple, xprv, ec)
xprv_dict = bip32.deserialize(xprv)
self.assertRaises(ValueError, to_pubkey_tuple, xprv_dict, ec)
# Invalid point: 7 is not a field element
P = INF
self.assertRaises(ValueError, to_pubkey_tuple, P, ec)
P_compr = b'\x02' + P[0].to_bytes(ec.psize, 'big')
self.assertRaises(ValueError, to_pubkey_tuple, P_compr, ec)
P_uncompr = b'\x04' + P[0].to_bytes(ec.psize, 'big') + P[1].to_bytes(ec.psize, 'big')
self.assertRaises(ValueError, to_pubkey_tuple, P_uncompr, ec)
P_compr_hexstr = P_compr.hex()
self.assertRaises(ValueError, to_pubkey_tuple, P_compr_hexstr, ec)
P_uncompr_hexstr = P_uncompr.hex()
self.assertRaises(ValueError, to_pubkey_tuple, P_uncompr_hexstr, ec)
t = xpub_dict['version']
t += xpub_dict['depth'].to_bytes(1, 'big')
t += xpub_dict['parent_fingerprint']
t += xpub_dict['index']
t += xpub_dict['chain_code']
t += P_compr
xpub = b58encode(t)
self.assertRaises(ValueError, to_pubkey_tuple, xpub, ec)
xpub_str = xpub.decode('ascii')
self.assertRaises(ValueError, to_pubkey_tuple, xpub_str, ec)
def test_to_pub_bytes(self):
xpub = b'xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8'
xpub_str = xpub.decode('ascii')
xpub_dict = bip32.deserialize(xpub)
P_compr = xpub_dict['key']
P_compr_hexstr = P_compr.hex()
P = xpub_dict['Q']
P_uncompr = bytes_from_point(P, False, ec)
P_uncompr_hexstr = P_uncompr.hex()
# BIP32 input, compressed result
self.assertEqual(to_pubkey_bytes(xpub, True, ec), P_compr)
self.assertEqual(to_pubkey_bytes(xpub_str, True, ec), P_compr)
self.assertEqual(to_pubkey_bytes(' ' + xpub_str + ' ', True, ec), P_compr)
self.assertEqual(to_pubkey_bytes(xpub_dict, True, ec), P_compr)
# compressed SEC Octets input, compressed result
self.assertEqual(to_pubkey_bytes(P_compr, True, ec), P_compr)
self.assertRaises(ValueError, to_pubkey_bytes, b'\x00' + P_compr, True, ec)
self.assertEqual(to_pubkey_bytes(P_compr_hexstr, True, ec), P_compr)
self.assertEqual(to_pubkey_bytes(' ' + P_compr_hexstr + ' ', True, ec), P_compr)
self.assertRaises(ValueError, to_pubkey_bytes, P_compr_hexstr + '00', True, ec)
# uncompressed SEC Octets input, compressed result
self.assertRaises(ValueError, to_pubkey_bytes, P_uncompr, True, ec)
self.assertRaises(ValueError, to_pubkey_bytes, P_uncompr_hexstr, True, ec)
self.assertRaises(ValueError, to_pubkey_bytes, ' ' + P_uncompr_hexstr + ' ', True, ec)
# native tuple input, compressed result
self.assertEqual(to_pubkey_bytes(P, True, ec), P_compr)
# BIP32 input, uncompressed result
self.assertRaises(ValueError, to_pubkey_bytes, xpub, False, ec)
self.assertRaises(ValueError, to_pubkey_bytes, xpub_str, False, ec)
self.assertRaises(ValueError, to_pubkey_bytes, ' ' + xpub_str + ' ', False, ec)
self.assertRaises(ValueError, to_pubkey_bytes, xpub_dict, False, ec)
# compressed SEC Octets input, uncompressed result
self.assertRaises(ValueError, to_pubkey_bytes, P_compr, False, ec)
self.assertRaises(ValueError, to_pubkey_bytes, P_compr_hexstr, False, ec)
self.assertRaises(ValueError, to_pubkey_bytes, ' ' + P_compr_hexstr + ' ', False, ec)
# uncompressed SEC Octets input, uncompressed result
self.assertEqual(to_pubkey_bytes(P_uncompr, False, ec), P_uncompr)
self.assertRaises(ValueError, to_pubkey_bytes, b'\x00' + P_uncompr, False, ec)
self.assertEqual(to_pubkey_bytes(P_uncompr_hexstr, False, ec), P_uncompr)
self.assertEqual(to_pubkey_bytes(' ' + P_uncompr_hexstr + ' ', False, ec), P_uncompr)
self.assertRaises(ValueError, to_pubkey_bytes, P_uncompr_hexstr + '00', False, ec)
# native tuple input, uncompressed result
self.assertEqual(to_pubkey_bytes(P, False, ec), P_uncompr)
# pubkey input
xprv = b"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi"
self.assertRaises(ValueError, to_pubkey_bytes, xprv, True, ec)
xprv_dict = bip32.deserialize(xprv)
self.assertRaises(ValueError, to_pubkey_bytes, xprv_dict, True, ec)
# Invalid point: 7 is not a field element
P = INF
self.assertRaises(ValueError, to_pubkey_bytes, P, True, ec)
P_compr = b'\x02' + P[0].to_bytes(ec.psize, 'big')
self.assertRaises(ValueError, to_pubkey_bytes, P_compr, True, ec)
P_uncompr = b'\x04' + P[0].to_bytes(ec.psize, 'big') + P[1].to_bytes(ec.psize, 'big')
self.assertRaises(ValueError, to_pubkey_bytes, P_uncompr, True, ec)
P_compr_hexstr = P_compr.hex()
self.assertRaises(ValueError, to_pubkey_bytes, P_compr_hexstr, True, ec)
P_uncompr_hexstr = P_uncompr.hex()
self.assertRaises(ValueError, to_pubkey_bytes, P_uncompr_hexstr, True, ec)
t = xpub_dict['version']
t += xpub_dict['depth'].to_bytes(1, 'big')
t += xpub_dict['parent_fingerprint']
t += xpub_dict['index']
t += xpub_dict['chain_code']
t += P_compr
xpub = b58encode(t)
self.assertRaises(ValueError, to_pubkey_bytes, xpub, True, ec)
xpub_str = xpub.decode('ascii')
self.assertRaises(ValueError, to_pubkey_bytes, xpub_str, True, ec)
if __name__ == "__main__":
# execute only if run as a script
unittest.main()
| 48.476471
| 129
| 0.690329
| 1,075
| 8,241
| 5.022326
| 0.11814
| 0.091869
| 0.086683
| 0.186701
| 0.851824
| 0.842749
| 0.819041
| 0.775699
| 0.753288
| 0.670865
| 0
| 0.024629
| 0.206771
| 8,241
| 169
| 130
| 48.763314
| 0.801285
| 0.119039
| 0
| 0.42735
| 0
| 0
| 0.090117
| 0.061368
| 0
| 0
| 0
| 0
| 0.504274
| 1
| 0.017094
| false
| 0
| 0.059829
| 0
| 0.08547
| 0.017094
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d725bf44f1254278448c6888f806ddea635065c4
| 7,870
|
py
|
Python
|
halotools/mock_observables/pairwise_velocities/tests/test_los_pvd_vs_rp.py
|
mclaughlin6464/halotools_old
|
96fbdf5fc156160f19ccd4ae3ee964f831d26fa6
|
[
"BSD-3-Clause"
] | null | null | null |
halotools/mock_observables/pairwise_velocities/tests/test_los_pvd_vs_rp.py
|
mclaughlin6464/halotools_old
|
96fbdf5fc156160f19ccd4ae3ee964f831d26fa6
|
[
"BSD-3-Clause"
] | null | null | null |
halotools/mock_observables/pairwise_velocities/tests/test_los_pvd_vs_rp.py
|
mclaughlin6464/halotools_old
|
96fbdf5fc156160f19ccd4ae3ee964f831d26fa6
|
[
"BSD-3-Clause"
] | null | null | null |
"""
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from astropy.tests.helper import pytest
from astropy.utils.misc import NumpyRNGContext
from ..los_pvd_vs_rp import los_pvd_vs_rp
from ...tests.cf_helpers import generate_locus_of_3d_points
__all__ = ('test_los_pvd_vs_rp_correctness1', 'test_los_pvd_vs_rp_correctness2',
'test_los_pvd_vs_rp_correctness3',
'test_los_pvd_vs_rp_auto_consistency', 'test_los_pvd_vs_rp_cross_consistency')
fixed_seed = 43
@pytest.mark.slow
def test_los_pvd_vs_rp_correctness1():
""" This function tests that the
`~halotools.mock_observables.los_pvd_vs_rp` function returns correct
results for a controlled distribution of points whose line-of-sight velocity
can be simply calculated.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.5, 0.1), the second at (0.5, 0.35, 0.25).
The first set of points is moving with random uniform z-velocities;
the second set of points is at rest.
PBCs are set to infinity in this test.
For every velocity in sample1, since we can count pairs analytically
for this configuration we know exactly how many appearances of each
velocity there will be, so we can calculate np.std on the exact
same set of points as the marked pair-counter should operate on.
"""
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.35, 0.25
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
with NumpyRNGContext(fixed_seed):
velocities1[:, 2] = np.random.uniform(0, 1, npts)
rp_bins, pi_max = np.array([0.001, 0.1, 0.3]), 0.2
s1s2 = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False)
correct_cross_pvd = np.std(np.repeat(velocities1[:, 2], npts))
assert np.allclose(s1s2[0], 0, rtol=0.1)
assert np.allclose(s1s2[1], correct_cross_pvd, rtol=0.001)
@pytest.mark.slow
def test_los_pvd_vs_rp_correctness2():
""" This function tests that the
`~halotools.mock_observables.los_pvd_vs_rp` function returns correct
results for a controlled distribution of points whose line-of-sight velocity
can be simply calculated.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.5, 0.1), the second at (0.5, 0.35, 0.95).
The first set of points is moving with random uniform z-velocities;
the second set of points is at rest.
PBCs are operative in this test.
For every velocity in sample1, since we can count pairs analytically
for this configuration we know exactly how many appearances of each
velocity there will be, so we can calculate np.std on the exact
same set of points as the marked pair-counter should operate on.
"""
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.35, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
with NumpyRNGContext(fixed_seed):
velocities1[:, 2] = np.random.uniform(0, 1, npts)
rp_bins, pi_max = np.array([0.001, 0.1, 0.3]), 0.2
s1s2 = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False, period=1)
correct_cross_pvd = np.std(np.repeat(velocities1[:, 2], npts))
assert np.allclose(s1s2[0], 0, rtol=0.1)
assert np.allclose(s1s2[1], correct_cross_pvd, rtol=0.001)
@pytest.mark.slow
def test_los_pvd_vs_rp_correctness3():
""" This function tests that the
`~halotools.mock_observables.los_pvd_vs_rp` function returns correct
results for a controlled distribution of points whose line-of-sight velocity
can be simply calculated.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.5, 0.1), the second at (0.5, 0.35, 0.95).
The first set of points is moving with random uniform z-velocities;
the second set of points is at rest.
PBCs are operative in this test.
For every velocity in sample1, since we can count pairs analytically
for this configuration we know exactly how many appearances of each
velocity there will be, so we can calculate np.std on the exact
same set of points as the marked pair-counter should operate on.
This is the same test as test_los_pvd_vs_rp_correctness2, only here we
bundle the two sets of points into the same sample.
"""
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.35, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
with NumpyRNGContext(fixed_seed):
velocities1[:, 2] = np.random.uniform(0, 1, npts)
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
rp_bins, pi_max = np.array([0.001, 0.1, 0.3]), 0.2
s1s1 = los_pvd_vs_rp(sample, velocities, rp_bins, pi_max, period=1)
correct_cross_pvd = np.std(np.repeat(velocities1[:, 2], npts))
assert np.allclose(s1s1[1], correct_cross_pvd, rtol=0.001)
@pytest.mark.slow
def test_los_pvd_vs_rp_auto_consistency():
""" Verify that we get self-consistent auto-correlation results
regardless of whether we ask for cross-correlations.
"""
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.5, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
with NumpyRNGContext(fixed_seed):
velocities1[:, 2] = np.random.uniform(0, 1, npts)
rp_bins, pi_max = np.array([0.001, 0.1, 0.3]), 0.2
s1s1a, s1s2a, s2s2a = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
s1s1b, s2s2b = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2,
do_cross=False)
assert np.allclose(s1s1a, s1s1b, rtol=0.001)
assert np.allclose(s2s2a, s2s2b, rtol=0.001)
@pytest.mark.slow
def test_los_pvd_vs_rp_cross_consistency():
""" Verify that we get self-consistent auto-correlation results
regardless of whether we ask for cross-correlations.
"""
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.5, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
with NumpyRNGContext(fixed_seed):
velocities1[:, 2] = np.random.uniform(0, 1, npts)
rp_bins, pi_max = np.array([0.001, 0.1, 0.3]), 0.2
s1s1a, s1s2a, s2s2a = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
s1s2b = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2,
do_auto=False)
assert np.allclose(s1s2a, s1s2b, rtol=0.001)
| 37.47619
| 88
| 0.705591
| 1,299
| 7,870
| 4.113934
| 0.13164
| 0.009731
| 0.014596
| 0.043039
| 0.900075
| 0.895771
| 0.868825
| 0.8561
| 0.8561
| 0.850299
| 0
| 0.069975
| 0.18831
| 7,870
| 209
| 89
| 37.655502
| 0.766594
| 0.327954
| 0
| 0.704082
| 0
| 0
| 0.032373
| 0.032373
| 0
| 0
| 0
| 0
| 0.081633
| 1
| 0.05102
| false
| 0
| 0.061224
| 0
| 0.112245
| 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d73fc794a36998baf243df7cb08340cd282cc7b8
| 94
|
py
|
Python
|
podcast_feed_parser/__init__.py
|
The-Daishogun/podcast_feed_parser
|
95902a1234314085f012306bfea795d30872a70b
|
[
"MIT"
] | null | null | null |
podcast_feed_parser/__init__.py
|
The-Daishogun/podcast_feed_parser
|
95902a1234314085f012306bfea795d30872a70b
|
[
"MIT"
] | null | null | null |
podcast_feed_parser/__init__.py
|
The-Daishogun/podcast_feed_parser
|
95902a1234314085f012306bfea795d30872a70b
|
[
"MIT"
] | null | null | null |
from podcast_feed_parser.podcast_feed_parser import PodcastFeedParser
from utils import hasher
| 47
| 69
| 0.914894
| 13
| 94
| 6.307692
| 0.615385
| 0.268293
| 0.414634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074468
| 94
| 2
| 70
| 47
| 0.942529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d758485f0490861b49b3cbe2512b1e8f95198f33
| 3,833
|
py
|
Python
|
simulation/tests/test_model.py
|
fladdimir/csa-simulation-based-sc-forecast
|
80f176a783496f8859609f63b56c6199a73d9909
|
[
"MIT"
] | 2
|
2020-11-04T17:34:38.000Z
|
2021-08-13T07:55:23.000Z
|
simulation/tests/test_model.py
|
fladdimir/csa-simulation-based-sc-forecast
|
80f176a783496f8859609f63b56c6199a73d9909
|
[
"MIT"
] | null | null | null |
simulation/tests/test_model.py
|
fladdimir/csa-simulation-based-sc-forecast
|
80f176a783496f8859609f63b56c6199a73d9909
|
[
"MIT"
] | 2
|
2021-05-28T02:55:44.000Z
|
2021-08-03T13:56:10.000Z
|
import os
import executables.run_parse_convert
from model.blocks.order import Order
from model.blocks.wip import wip
from model.sim_model import Model
from simpy.core import Environment
directory_path = os.path.dirname(os.path.realpath(__file__))
def test_parse_convert():
executables.run_parse_convert.parse_bpmn()
executables.run_parse_convert.convert()
def test_model_run_until():
env = Environment()
model = Model(env)
model.env.run(until=41)
assert model.env.now == 41
assert model.customer.overall_count_in == 5
assert model.delivery.overall_count_in == 3
assert len(model.wait_for_material.entities) == 1
assert len(model.production.entities) == 1
model.env.run()
assert env.now == 70
assert model.customer.overall_count_in == model.delivery.overall_count_in == 6
def test_model_run_until_wip():
env = Environment()
model = Model(env)
env.run(until=31)
# wip
file_path = directory_path + "_temp_state.json"
wip.capture_state_to_file(model, file_path)
model = wip.load_state_from_file(file_path, Environment, Model, {"Order": Order})
assert model.env.now == 31
model.env.run(until=41)
assert model.env.now == 41
assert model.customer.overall_count_in == 5
assert model.delivery.overall_count_in == 3
assert len(model.wait_for_material.entities) == 1
assert len(model.production.entities) == 1
model.env.run()
assert model.env.now == 70
assert model.customer.overall_count_in == model.delivery.overall_count_in == 6
def test_model_active():
env = Environment()
model = Model(env)
for block in model.model_components.values():
block.active = True
model.env.run()
assert env.now == 75
assert model.customer.overall_count_in == model.delivery.overall_count_in == 6
def test_model_active_wip():
env = Environment()
model = Model(env)
for block in model.model_components.values():
block.active = True
model.env.run(until=31)
# wip
file_path = directory_path + "_temp_state.json"
wip.capture_state_to_file(model, file_path)
model = wip.load_state_from_file(file_path, Environment, Model, {"Order": Order})
for block in model.model_components.values():
block.active = True
assert model.env.now == 31
model.env.run()
assert model.env.now == 75
assert model.customer.overall_count_in == model.delivery.overall_count_in == 6
def test_model_active_wip_inactive_except_source():
env = Environment()
model = Model(env)
for block in model.model_components.values():
block.active = True
model.env.run(until=31)
# wip
file_path = directory_path + "_temp_state.json"
wip.capture_state_to_file(model, file_path)
model = wip.load_state_from_file(file_path, Environment, Model, {"Order": Order})
for block in model.model_components.values():
block.active = False
model.customer.active = True # except source
assert model.env.now == 31
model.env.run()
assert model.env.now == 75
assert model.customer.overall_count_in == model.delivery.overall_count_in == 6
def test_model_active_wip_fully_inactive():
env = Environment()
model = Model(env)
for block in model.model_components.values():
block.active = True
model.env.run(until=31)
# wip
file_path = directory_path + "_temp_state.json"
wip.capture_state_to_file(model, file_path)
model = wip.load_state_from_file(file_path, Environment, Model, {"Order": Order})
for block in model.model_components.values():
block.active = False # source stops creating elements
assert model.env.now == 31
model.env.run()
assert model.env.now == 55
assert model.customer.overall_count_in == model.delivery.overall_count_in == 4
| 30.664
| 85
| 0.706235
| 544
| 3,833
| 4.738971
| 0.123162
| 0.083786
| 0.086889
| 0.065943
| 0.850659
| 0.829325
| 0.815361
| 0.813421
| 0.806051
| 0.806051
| 0
| 0.016041
| 0.186799
| 3,833
| 124
| 86
| 30.91129
| 0.811036
| 0.015654
| 0
| 0.752688
| 0
| 0
| 0.022305
| 0
| 0
| 0
| 0
| 0
| 0.27957
| 1
| 0.075269
| false
| 0
| 0.064516
| 0
| 0.139785
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d76b3510cad652f76a637d270d50951e9503f875
| 25,462
|
py
|
Python
|
cicd/upload_to_garmin.py
|
samueldumont/WorkoutDatafield
|
7f6f290f8695935fcae0c28a486256067cb93a13
|
[
"MIT"
] | null | null | null |
cicd/upload_to_garmin.py
|
samueldumont/WorkoutDatafield
|
7f6f290f8695935fcae0c28a486256067cb93a13
|
[
"MIT"
] | null | null | null |
cicd/upload_to_garmin.py
|
samueldumont/WorkoutDatafield
|
7f6f290f8695935fcae0c28a486256067cb93a13
|
[
"MIT"
] | null | null | null |
import os
import requests
import xml.etree.ElementTree as ET
from pathlib import Path
from bs4 import BeautifulSoup
from requests_toolbelt import MultipartEncoder
import cloudscraper
import random, string
import time
GARMIN_USERNAME = os.getenv("GARMIN_USERNAME")
GARMIN_PASSWORD = os.getenv("GARMIN_PASSWORD")
APP_ID = os.getenv("APP_ID")
STORE_ID = os.getenv("STORE_ID")
DEV_ID = os.getenv("DEV_ID")
TAG_NAME = os.getenv("TAG_NAME")
BETA_APP = os.getenv("BETA_APP")
DEV_EMAIL = os.getenv("DEV_EMAIL")
try:
release_notes = requests.get(
f"https://api.github.com/repos/samueldumont/WorkoutDatafield/releases/tags/{TAG_NAME}"
).json()["body"]
except:
release_notes = ""
print(f"Uploading {STORE_ID} with tag {TAG_NAME}. Beta : {BETA_APP}.")
scraper = cloudscraper.create_scraper() # returns a CloudScraper instance
### GET INITIAL COOKIES
headers = {
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"sec-ch-ua-mobile": "?0",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "none",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"Sec-Fetch-Dest": "document",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en",
}
querystring = {
"service": "https://apps.garmin.com/en-US",
"webhost": "apps.garmin.com",
"source": "https://apps.garmin.com/login",
"redirectAfterAccountLoginUrl": "https://apps.garmin.com/en-US",
"redirectAfterAccountCreationUrl": "https://apps.garmin.com/en-US",
"gauthHost": "https://sso.garmin.com/sso",
"locale": "en_US",
"id": "gauth-widget",
"cssUrl": "//static.garmin.com/com.garmin.connect/ui/css/gauth-custom-v1.2-min.css",
"privacyStatementUrl": "//www.garmin.com/en-US/privacy/connect/",
"clientId": "APPS_LIBRARY",
"rememberMeShown": "true",
"rememberMeChecked": "false",
"createAccountShown": "true",
"openCreateAccount": "false",
"displayNameShown": "false",
"consumeServiceTicket": "true",
"initialFocus": "true",
"embedWidget": "false",
"generateExtraServiceTicket": "true",
"generateTwoExtraServiceTickets": "false",
"generateNoServiceTicket": "false",
"globalOptInShown": "false",
"globalOptInChecked": "false",
"mobile": "false",
"connectLegalTerms": "false",
"showTermsOfUse": "false",
"showPrivacyPolicy": "false",
"showConnectLegalAge": "false",
"locationPromptShown": "true",
"showPassword": "true",
"useCustomHeader": "false",
"mfaRequired": "false",
"performMFACheck": "false",
"rememberMyBrowserShown": "false",
"rememberMyBrowserChecked": "false",
}
url = f"https://apps.garmin.com/en-US/"
scraper.get(url, headers=headers)
#### LOGIN
url = "https://sso.garmin.com/sso/signin"
payload = ""
headers = {
"Accept-Language": "en",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Referer": "https://apps.garmin.com/",
"Sec-Fetch-Dest": "iframe",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
}
response = scraper.get(url, headers=headers, params=querystring)
soup = BeautifulSoup(response.content, "html.parser")
token = soup.find_all("input", {"name": "_csrf"})[0].get("value")
query = soup.find_all("input", {"id": "queryString"})[0].get("value")
payload = {
"username": GARMIN_USERNAME,
"password": GARMIN_PASSWORD,
"embed": "false",
"_csrf": token,
"rememberme": "on",
}
headers = {
"Accept-Language": "en",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Cache-Control": "no-cache",
"Content-Type": "application/x-www-form-urlencoded",
"Origin": "https://sso.garmin.com",
"DNT": "1",
"Referer": f"{url}?{query}",
"Sec-Fetch-Dest": "iframe",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
}
response = scraper.post(url, data=payload, headers=headers, params=querystring)
print(f"Login result: {response.status_code}")
### UPLOAD FILE
url = f"https://apps.garmin.com/en-US/developer/{DEV_ID}/apps/{STORE_ID}/update"
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
}
scraper.get(url, headers=headers)
m = MultipartEncoder(
fields={
"appVersion": TAG_NAME,
"betaApp": BETA_APP,
"submit": "",
"file": (
f"WorkoutDatafield-{TAG_NAME}.iq",
open(f"/tmp/WorkoutDatafield-{TAG_NAME}.iq", "rb"),
"application/octet-stream",
),
},
boundary="----WebKitFormBoundary"
+ "".join(random.sample(string.ascii_letters + string.digits, 16)),
)
headers = {
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Cache-Control": "no-cache",
"Content-Type": m.content_type,
"Origin": "https://apps.garmin.com",
"Referer": url,
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
}
response = scraper.post(url, headers=headers, data=m, allow_redirects=True)
print(f"Upload result : {response.status_code}")
# UPDATE DETAILS, STILL TODO
url = f"https://apps.garmin.com/en-US/developer/{DEV_ID}/apps/{STORE_ID}/edit"
response = scraper.get(url)
soup = BeautifulSoup(response.text, "html.parser")
appDescription = soup.find("textarea", {"id": "app-desc-en"}).renderContents()
m = MultipartEncoder(
fields=[
("localizedAppModel[0].appLocale", "en"),
(
"localizedAppModel[0].appTitle",
"WorkoutDatafield - beta" if BETA_APP == "true" else "WorkoutDatafield",
),
("localizedAppModel[0].appDescription", appDescription),
("localizedAppModel[0].appWhatsNew", release_notes),
(
"localizedAppModel[0].heroImageObject",
(
"",
"",
"application/octet-stream",
),
),
("localizedAppModel[0].deleteHeroImage", "false"),
("localizedAppModel[0].heroImageUrl", ""),
("category", "251"),
("policy", "no"),
("policyUrl", ""),
("antPlusProfiles", "no"),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("_antPlusProfilesModel.selectedAntPlusProfiles", (None, "on")),
("antPlusProfilesModel.enteredAntPlusProfileComments[9999999]", ""),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("_countriesModel.selectedCountries", "on"),
("regionalLimits", "no"),
(
"iconFile",
(
"",
"",
"application/octet-stream",
),
),
(
"screenshotFiles[0]",
(
"",
"",
"application/octet-stream",
),
),
("screenshotIds[0]", ""),
("deleted[0]", "false"),
(
"screenshotFiles[1]",
(
"",
"",
"application/octet-stream",
),
),
("screenshotIds[1]", ""),
("deleted[1]", "false"),
(
"screenshotFiles[2]",
(
"",
"",
"application/octet-stream",
),
),
("screenshotIds[2]", ""),
("deleted[2]", "false"),
(
"screenshotFiles[3]",
(
"",
"",
"application/octet-stream",
),
),
("screenshotIds[3]", ""),
("deleted[3]", "false"),
(
"screenshotFiles[4]",
(
"",
"",
"application/octet-stream",
),
),
("screenshotIds[4]", ""),
("deleted[4]", "false"),
("videoUrl", ""),
("devEmail", DEV_EMAIL),
("sourceUrl", "https://github.com/samueldumont/WorkoutDatafield"),
("reviewNotificationActive", "true"),
("migrationAllowed", "true"),
("paymentModelCheck", "no"),
("iosAppUrl", ""),
("androidAppUrl", ""),
("hardwareProductUrl", ""),
("betaApp", BETA_APP),
("submit", ""),
],
boundary="----WebKitFormBoundary"
+ "".join(random.sample(string.ascii_letters + string.digits, 16)),
)
headers = {
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Cache-Control": "no-cache",
"Content-Type": m.content_type,
"Origin": "https://apps.garmin.com",
"Referer": url,
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
}
response = scraper.post(url, headers=headers, data=m, allow_redirects=True)
print(f"What's new update result : {response.status_code}")
| 43.674099
| 152
| 0.626345
| 1,955
| 25,462
| 7.97954
| 0.144246
| 0.498782
| 0.530962
| 0.753205
| 0.780641
| 0.773333
| 0.769103
| 0.769103
| 0.764679
| 0.764679
| 0
| 0.011542
| 0.19696
| 25,462
| 583
| 153
| 43.674099
| 0.751406
| 0.00377
| 0
| 0.685558
| 0
| 0.025594
| 0.608543
| 0.450856
| 0
| 0
| 0
| 0.001715
| 0
| 1
| 0
| false
| 0.005484
| 0.016453
| 0
| 0.016453
| 0.007313
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
ad160084b502a6ace74d3d32d9f5f8b584c9ff12
| 183,081
|
py
|
Python
|
tdm/python/antchain_sdk_tdm/models.py
|
alipay/antchain-openapi-prod-sdk
|
f78549e5135d91756093bd88d191ca260b28e083
|
[
"MIT"
] | 6
|
2020-06-28T06:40:50.000Z
|
2022-02-25T11:02:18.000Z
|
tdm/python/antchain_sdk_tdm/models.py
|
alipay/antchain-openapi-prod-sdk
|
f78549e5135d91756093bd88d191ca260b28e083
|
[
"MIT"
] | null | null | null |
tdm/python/antchain_sdk_tdm/models.py
|
alipay/antchain-openapi-prod-sdk
|
f78549e5135d91756093bd88d191ca260b28e083
|
[
"MIT"
] | 6
|
2020-06-30T09:29:03.000Z
|
2022-01-07T10:42:22.000Z
|
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import List
class Config(TeaModel):
"""
Model for initing client
"""
def __init__(
self,
access_key_id: str = None,
access_key_secret: str = None,
security_token: str = None,
protocol: str = None,
read_timeout: int = None,
connect_timeout: int = None,
http_proxy: str = None,
https_proxy: str = None,
endpoint: str = None,
no_proxy: str = None,
max_idle_conns: int = None,
user_agent: str = None,
socks_5proxy: str = None,
socks_5net_work: str = None,
max_idle_time_millis: int = None,
keep_alive_duration_millis: int = None,
max_requests: int = None,
max_requests_per_host: int = None,
):
# accesskey id
self.access_key_id = access_key_id
# accesskey secret
self.access_key_secret = access_key_secret
# security token
self.security_token = security_token
# http protocol
self.protocol = protocol
# read timeout
self.read_timeout = read_timeout
# connect timeout
self.connect_timeout = connect_timeout
# http proxy
self.http_proxy = http_proxy
# https proxy
self.https_proxy = https_proxy
# endpoint
self.endpoint = endpoint
# proxy white list
self.no_proxy = no_proxy
# max idle conns
self.max_idle_conns = max_idle_conns
# user agent
self.user_agent = user_agent
# socks5 proxy
self.socks_5proxy = socks_5proxy
# socks5 network
self.socks_5net_work = socks_5net_work
# 长链接最大空闲时长
self.max_idle_time_millis = max_idle_time_millis
# 长链接最大连接时长
self.keep_alive_duration_millis = keep_alive_duration_millis
# 最大连接数(长链接最大总数)
self.max_requests = max_requests
# 每个目标主机的最大连接数(分主机域名的长链接最大总数
self.max_requests_per_host = max_requests_per_host
def validate(self):
pass
def to_map(self):
result = dict()
if self.access_key_id is not None:
result['accessKeyId'] = self.access_key_id
if self.access_key_secret is not None:
result['accessKeySecret'] = self.access_key_secret
if self.security_token is not None:
result['securityToken'] = self.security_token
if self.protocol is not None:
result['protocol'] = self.protocol
if self.read_timeout is not None:
result['readTimeout'] = self.read_timeout
if self.connect_timeout is not None:
result['connectTimeout'] = self.connect_timeout
if self.http_proxy is not None:
result['httpProxy'] = self.http_proxy
if self.https_proxy is not None:
result['httpsProxy'] = self.https_proxy
if self.endpoint is not None:
result['endpoint'] = self.endpoint
if self.no_proxy is not None:
result['noProxy'] = self.no_proxy
if self.max_idle_conns is not None:
result['maxIdleConns'] = self.max_idle_conns
if self.user_agent is not None:
result['userAgent'] = self.user_agent
if self.socks_5proxy is not None:
result['socks5Proxy'] = self.socks_5proxy
if self.socks_5net_work is not None:
result['socks5NetWork'] = self.socks_5net_work
if self.max_idle_time_millis is not None:
result['maxIdleTimeMillis'] = self.max_idle_time_millis
if self.keep_alive_duration_millis is not None:
result['keepAliveDurationMillis'] = self.keep_alive_duration_millis
if self.max_requests is not None:
result['maxRequests'] = self.max_requests
if self.max_requests_per_host is not None:
result['maxRequestsPerHost'] = self.max_requests_per_host
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('accessKeyId') is not None:
self.access_key_id = m.get('accessKeyId')
if m.get('accessKeySecret') is not None:
self.access_key_secret = m.get('accessKeySecret')
if m.get('securityToken') is not None:
self.security_token = m.get('securityToken')
if m.get('protocol') is not None:
self.protocol = m.get('protocol')
if m.get('readTimeout') is not None:
self.read_timeout = m.get('readTimeout')
if m.get('connectTimeout') is not None:
self.connect_timeout = m.get('connectTimeout')
if m.get('httpProxy') is not None:
self.http_proxy = m.get('httpProxy')
if m.get('httpsProxy') is not None:
self.https_proxy = m.get('httpsProxy')
if m.get('endpoint') is not None:
self.endpoint = m.get('endpoint')
if m.get('noProxy') is not None:
self.no_proxy = m.get('noProxy')
if m.get('maxIdleConns') is not None:
self.max_idle_conns = m.get('maxIdleConns')
if m.get('userAgent') is not None:
self.user_agent = m.get('userAgent')
if m.get('socks5Proxy') is not None:
self.socks_5proxy = m.get('socks5Proxy')
if m.get('socks5NetWork') is not None:
self.socks_5net_work = m.get('socks5NetWork')
if m.get('maxIdleTimeMillis') is not None:
self.max_idle_time_millis = m.get('maxIdleTimeMillis')
if m.get('keepAliveDurationMillis') is not None:
self.keep_alive_duration_millis = m.get('keepAliveDurationMillis')
if m.get('maxRequests') is not None:
self.max_requests = m.get('maxRequests')
if m.get('maxRequestsPerHost') is not None:
self.max_requests_per_host = m.get('maxRequestsPerHost')
return self
class TdmCpfEncodeNameVO(TeaModel):
def __init__(
self,
code: str = None,
name: str = None,
):
# 公积金中心编码
self.code = code
# 公积金中心名称
self.name = name
def validate(self):
self.validate_required(self.code, 'code')
self.validate_required(self.name, 'name')
def to_map(self):
result = dict()
if self.code is not None:
result['code'] = self.code
if self.name is not None:
result['name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('code') is not None:
self.code = m.get('code')
if m.get('name') is not None:
self.name = m.get('name')
return self
class TdmCpfCitysVO(TeaModel):
def __init__(
self,
code: str = None,
name: str = None,
cpfs: List[TdmCpfEncodeNameVO] = None,
):
# 城市编码
self.code = code
# 城市名称
self.name = name
# 公积金中心城市列表
self.cpfs = cpfs
def validate(self):
self.validate_required(self.code, 'code')
self.validate_required(self.name, 'name')
self.validate_required(self.cpfs, 'cpfs')
if self.cpfs:
for k in self.cpfs:
if k:
k.validate()
def to_map(self):
result = dict()
if self.code is not None:
result['code'] = self.code
if self.name is not None:
result['name'] = self.name
result['cpfs'] = []
if self.cpfs is not None:
for k in self.cpfs:
result['cpfs'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('code') is not None:
self.code = m.get('code')
if m.get('name') is not None:
self.name = m.get('name')
self.cpfs = []
if m.get('cpfs') is not None:
for k in m.get('cpfs'):
temp_model = TdmCpfEncodeNameVO()
self.cpfs.append(temp_model.from_map(k))
return self
class ChainInfo(TeaModel):
def __init__(
self,
block_height: str = None,
translate_date: str = None,
tx_hash: str = None,
):
# 块高
self.block_height = block_height
# 交易时间
self.translate_date = translate_date
# hash(64位)
self.tx_hash = tx_hash
def validate(self):
pass
def to_map(self):
result = dict()
if self.block_height is not None:
result['block_height'] = self.block_height
if self.translate_date is not None:
result['translate_date'] = self.translate_date
if self.tx_hash is not None:
result['tx_hash'] = self.tx_hash
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('block_height') is not None:
self.block_height = m.get('block_height')
if m.get('translate_date') is not None:
self.translate_date = m.get('translate_date')
if m.get('tx_hash') is not None:
self.tx_hash = m.get('tx_hash')
return self
class AuthAgreement(TeaModel):
def __init__(
self,
auth_agreement_code: str = None,
auth_agreement_type: str = None,
auth_begin_time: str = None,
auth_end_time: str = None,
auth_count: int = None,
auth_balance_count: int = None,
):
# 授权协议code
self.auth_agreement_code = auth_agreement_code
# 授权协议类型:
# TIME、时间授权
# COUNT、次数授权
# TIME_COUNT、时间范围内次数授权
self.auth_agreement_type = auth_agreement_type
# 授权开始ishi见
self.auth_begin_time = auth_begin_time
# 授权截止日期
#
#
self.auth_end_time = auth_end_time
# 授权次数
#
#
self.auth_count = auth_count
# 剩余授权次数
self.auth_balance_count = auth_balance_count
def validate(self):
self.validate_required(self.auth_agreement_code, 'auth_agreement_code')
self.validate_required(self.auth_agreement_type, 'auth_agreement_type')
def to_map(self):
result = dict()
if self.auth_agreement_code is not None:
result['auth_agreement_code'] = self.auth_agreement_code
if self.auth_agreement_type is not None:
result['auth_agreement_type'] = self.auth_agreement_type
if self.auth_begin_time is not None:
result['auth_begin_time'] = self.auth_begin_time
if self.auth_end_time is not None:
result['auth_end_time'] = self.auth_end_time
if self.auth_count is not None:
result['auth_count'] = self.auth_count
if self.auth_balance_count is not None:
result['auth_balance_count'] = self.auth_balance_count
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_agreement_code') is not None:
self.auth_agreement_code = m.get('auth_agreement_code')
if m.get('auth_agreement_type') is not None:
self.auth_agreement_type = m.get('auth_agreement_type')
if m.get('auth_begin_time') is not None:
self.auth_begin_time = m.get('auth_begin_time')
if m.get('auth_end_time') is not None:
self.auth_end_time = m.get('auth_end_time')
if m.get('auth_count') is not None:
self.auth_count = m.get('auth_count')
if m.get('auth_balance_count') is not None:
self.auth_balance_count = m.get('auth_balance_count')
return self
class CertUseParams(TeaModel):
def __init__(
self,
issue_id: str = None,
):
# 证明文件ID
self.issue_id = issue_id
def validate(self):
self.validate_required(self.issue_id, 'issue_id')
def to_map(self):
result = dict()
if self.issue_id is not None:
result['issue_id'] = self.issue_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
return self
class AuthUsedRecord(TeaModel):
def __init__(
self,
authorized_name: str = None,
auth_code: str = None,
chain_info: ChainInfo = None,
extend_params: str = None,
target_name: str = None,
tee_data: str = None,
use_date: str = None,
):
# 被授权租户名称:
#
# 身份证号/统一社会组织机构信用码
#
#
self.authorized_name = authorized_name
# 授权码
#
#
self.auth_code = auth_code
# 链的信息
self.chain_info = chain_info
# 扩展字段
self.extend_params = extend_params
# 标的物,产品码名称
#
#
self.target_name = target_name
# 授权可信内容
self.tee_data = tee_data
# 数据使用时间
#
#
self.use_date = use_date
def validate(self):
self.validate_required(self.authorized_name, 'authorized_name')
self.validate_required(self.auth_code, 'auth_code')
self.validate_required(self.chain_info, 'chain_info')
if self.chain_info:
self.chain_info.validate()
self.validate_required(self.extend_params, 'extend_params')
self.validate_required(self.target_name, 'target_name')
self.validate_required(self.use_date, 'use_date')
def to_map(self):
result = dict()
if self.authorized_name is not None:
result['authorized_name'] = self.authorized_name
if self.auth_code is not None:
result['auth_code'] = self.auth_code
if self.chain_info is not None:
result['chain_info'] = self.chain_info.to_map()
if self.extend_params is not None:
result['extend_params'] = self.extend_params
if self.target_name is not None:
result['target_name'] = self.target_name
if self.tee_data is not None:
result['tee_data'] = self.tee_data
if self.use_date is not None:
result['use_date'] = self.use_date
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('authorized_name') is not None:
self.authorized_name = m.get('authorized_name')
if m.get('auth_code') is not None:
self.auth_code = m.get('auth_code')
if m.get('chain_info') is not None:
temp_model = ChainInfo()
self.chain_info = temp_model.from_map(m['chain_info'])
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
if m.get('target_name') is not None:
self.target_name = m.get('target_name')
if m.get('tee_data') is not None:
self.tee_data = m.get('tee_data')
if m.get('use_date') is not None:
self.use_date = m.get('use_date')
return self
class CpfGrdkcxQueryExtendParams(TeaModel):
def __init__(
self,
zjbzxbm: str = None,
xingming: str = None,
zjlx: str = None,
zjhm: str = None,
dkhtbh: str = None,
):
# 住建部中心编码
self.zjbzxbm = zjbzxbm
# 姓名
self.xingming = xingming
# 证件类型
self.zjlx = zjlx
# 证件号码
self.zjhm = zjhm
# 贷款合同编号
self.dkhtbh = dkhtbh
def validate(self):
self.validate_required(self.zjbzxbm, 'zjbzxbm')
self.validate_required(self.xingming, 'xingming')
self.validate_required(self.zjlx, 'zjlx')
self.validate_required(self.zjhm, 'zjhm')
self.validate_required(self.dkhtbh, 'dkhtbh')
def to_map(self):
result = dict()
if self.zjbzxbm is not None:
result['zjbzxbm'] = self.zjbzxbm
if self.xingming is not None:
result['xingming'] = self.xingming
if self.zjlx is not None:
result['zjlx'] = self.zjlx
if self.zjhm is not None:
result['zjhm'] = self.zjhm
if self.dkhtbh is not None:
result['dkhtbh'] = self.dkhtbh
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('zjbzxbm') is not None:
self.zjbzxbm = m.get('zjbzxbm')
if m.get('xingming') is not None:
self.xingming = m.get('xingming')
if m.get('zjlx') is not None:
self.zjlx = m.get('zjlx')
if m.get('zjhm') is not None:
self.zjhm = m.get('zjhm')
if m.get('dkhtbh') is not None:
self.dkhtbh = m.get('dkhtbh')
return self
class CertSummary(TeaModel):
def __init__(
self,
issue_id: str = None,
issue_cert_type: str = None,
issue_cert_type_desc: str = None,
issue_time: str = None,
issue_icon: str = None,
):
# 证明文件ID
self.issue_id = issue_id
# 证明文件类型
self.issue_cert_type = issue_cert_type
# 证明文件描述
self.issue_cert_type_desc = issue_cert_type_desc
# 证明开具时间
self.issue_time = issue_time
# 证明图标(未使用)
self.issue_icon = issue_icon
def validate(self):
self.validate_required(self.issue_id, 'issue_id')
self.validate_required(self.issue_cert_type, 'issue_cert_type')
self.validate_required(self.issue_cert_type_desc, 'issue_cert_type_desc')
self.validate_required(self.issue_time, 'issue_time')
def to_map(self):
result = dict()
if self.issue_id is not None:
result['issue_id'] = self.issue_id
if self.issue_cert_type is not None:
result['issue_cert_type'] = self.issue_cert_type
if self.issue_cert_type_desc is not None:
result['issue_cert_type_desc'] = self.issue_cert_type_desc
if self.issue_time is not None:
result['issue_time'] = self.issue_time
if self.issue_icon is not None:
result['issue_icon'] = self.issue_icon
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
if m.get('issue_cert_type') is not None:
self.issue_cert_type = m.get('issue_cert_type')
if m.get('issue_cert_type_desc') is not None:
self.issue_cert_type_desc = m.get('issue_cert_type_desc')
if m.get('issue_time') is not None:
self.issue_time = m.get('issue_time')
if m.get('issue_icon') is not None:
self.issue_icon = m.get('issue_icon')
return self
class CpfBaseInfoQueryExtendParams(TeaModel):
def __init__(
self,
zjbzxbm: str = None,
yhlsh: str = None,
xingming: str = None,
zjhm: str = None,
appid: str = None,
):
# 住建部中心编码
self.zjbzxbm = zjbzxbm
# 业务流水号
self.yhlsh = yhlsh
# 姓名
self.xingming = xingming
# 证件号码
self.zjhm = zjhm
# 银行程序标志
self.appid = appid
def validate(self):
self.validate_required(self.zjbzxbm, 'zjbzxbm')
self.validate_required(self.yhlsh, 'yhlsh')
self.validate_required(self.xingming, 'xingming')
self.validate_required(self.zjhm, 'zjhm')
self.validate_required(self.appid, 'appid')
def to_map(self):
result = dict()
if self.zjbzxbm is not None:
result['zjbzxbm'] = self.zjbzxbm
if self.yhlsh is not None:
result['yhlsh'] = self.yhlsh
if self.xingming is not None:
result['xingming'] = self.xingming
if self.zjhm is not None:
result['zjhm'] = self.zjhm
if self.appid is not None:
result['appid'] = self.appid
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('zjbzxbm') is not None:
self.zjbzxbm = m.get('zjbzxbm')
if m.get('yhlsh') is not None:
self.yhlsh = m.get('yhlsh')
if m.get('xingming') is not None:
self.xingming = m.get('xingming')
if m.get('zjhm') is not None:
self.zjhm = m.get('zjhm')
if m.get('appid') is not None:
self.appid = m.get('appid')
return self
class AuthRecord(TeaModel):
def __init__(
self,
authorized_name: str = None,
auth_agreement: AuthAgreement = None,
auth_code: str = None,
auth_date: str = None,
auth_status: str = None,
chain_info: ChainInfo = None,
data_owner_identity: str = None,
data_owner_name: str = None,
extend_params: str = None,
target_name: str = None,
tee_data: str = None,
):
# 被授权租户名称:
# 身份证号/统一社会组织机构信用码
self.authorized_name = authorized_name
# 授权协议规则
self.auth_agreement = auth_agreement
# 授权码
#
#
self.auth_code = auth_code
# 授权时间
#
self.auth_date = auth_date
# 授权有效状态:
# 1、生效中、
# 0、失效
#
#
self.auth_status = auth_status
# 链的信息
self.chain_info = chain_info
# 授权租户身份ID:
# 身份证号/统一社会组织机构信用码
self.data_owner_identity = data_owner_identity
# 授权用户名称
self.data_owner_name = data_owner_name
# 扩展字段,与target_name拼成可见的授权详情name
#
#
self.extend_params = extend_params
# 标的物名称
#
#
self.target_name = target_name
# 授权可信数据
self.tee_data = tee_data
def validate(self):
self.validate_required(self.authorized_name, 'authorized_name')
self.validate_required(self.auth_agreement, 'auth_agreement')
if self.auth_agreement:
self.auth_agreement.validate()
self.validate_required(self.auth_code, 'auth_code')
self.validate_required(self.auth_date, 'auth_date')
self.validate_required(self.auth_status, 'auth_status')
self.validate_required(self.chain_info, 'chain_info')
if self.chain_info:
self.chain_info.validate()
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.data_owner_name, 'data_owner_name')
self.validate_required(self.extend_params, 'extend_params')
self.validate_required(self.target_name, 'target_name')
def to_map(self):
result = dict()
if self.authorized_name is not None:
result['authorized_name'] = self.authorized_name
if self.auth_agreement is not None:
result['auth_agreement'] = self.auth_agreement.to_map()
if self.auth_code is not None:
result['auth_code'] = self.auth_code
if self.auth_date is not None:
result['auth_date'] = self.auth_date
if self.auth_status is not None:
result['auth_status'] = self.auth_status
if self.chain_info is not None:
result['chain_info'] = self.chain_info.to_map()
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.extend_params is not None:
result['extend_params'] = self.extend_params
if self.target_name is not None:
result['target_name'] = self.target_name
if self.tee_data is not None:
result['tee_data'] = self.tee_data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('authorized_name') is not None:
self.authorized_name = m.get('authorized_name')
if m.get('auth_agreement') is not None:
temp_model = AuthAgreement()
self.auth_agreement = temp_model.from_map(m['auth_agreement'])
if m.get('auth_code') is not None:
self.auth_code = m.get('auth_code')
if m.get('auth_date') is not None:
self.auth_date = m.get('auth_date')
if m.get('auth_status') is not None:
self.auth_status = m.get('auth_status')
if m.get('chain_info') is not None:
temp_model = ChainInfo()
self.chain_info = temp_model.from_map(m['chain_info'])
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
if m.get('target_name') is not None:
self.target_name = m.get('target_name')
if m.get('tee_data') is not None:
self.tee_data = m.get('tee_data')
return self
class CpfUserAccountInfo(TeaModel):
def __init__(
self,
account_status: str = None,
balance: str = None,
inst_name: str = None,
account_id: str = None,
):
# 账户状态
self.account_status = account_status
# 账户余额
self.balance = balance
# 缴纳单位名称
self.inst_name = inst_name
# 个人账户
self.account_id = account_id
def validate(self):
self.validate_required(self.account_status, 'account_status')
self.validate_required(self.balance, 'balance')
self.validate_required(self.inst_name, 'inst_name')
self.validate_required(self.account_id, 'account_id')
def to_map(self):
result = dict()
if self.account_status is not None:
result['account_status'] = self.account_status
if self.balance is not None:
result['balance'] = self.balance
if self.inst_name is not None:
result['inst_name'] = self.inst_name
if self.account_id is not None:
result['account_id'] = self.account_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('account_status') is not None:
self.account_status = m.get('account_status')
if m.get('balance') is not None:
self.balance = m.get('balance')
if m.get('inst_name') is not None:
self.inst_name = m.get('inst_name')
if m.get('account_id') is not None:
self.account_id = m.get('account_id')
return self
class CertUsageLogVO(TeaModel):
def __init__(
self,
user_name: str = None,
usage_time: str = None,
issue_cert_type: str = None,
issue_cert_type_desc: str = None,
issue_time: str = None,
purpose: str = None,
biz_id: str = None,
):
# 使用方名称
self.user_name = user_name
# 使用时间
self.usage_time = usage_time
# 证明类型
self.issue_cert_type = issue_cert_type
# 证明类型描述
self.issue_cert_type_desc = issue_cert_type_desc
# 证明开具时间
self.issue_time = issue_time
# 用途
self.purpose = purpose
# 业务流水号
self.biz_id = biz_id
def validate(self):
self.validate_required(self.user_name, 'user_name')
self.validate_required(self.usage_time, 'usage_time')
self.validate_required(self.issue_cert_type, 'issue_cert_type')
self.validate_required(self.issue_cert_type_desc, 'issue_cert_type_desc')
self.validate_required(self.issue_time, 'issue_time')
self.validate_required(self.purpose, 'purpose')
self.validate_required(self.biz_id, 'biz_id')
def to_map(self):
result = dict()
if self.user_name is not None:
result['user_name'] = self.user_name
if self.usage_time is not None:
result['usage_time'] = self.usage_time
if self.issue_cert_type is not None:
result['issue_cert_type'] = self.issue_cert_type
if self.issue_cert_type_desc is not None:
result['issue_cert_type_desc'] = self.issue_cert_type_desc
if self.issue_time is not None:
result['issue_time'] = self.issue_time
if self.purpose is not None:
result['purpose'] = self.purpose
if self.biz_id is not None:
result['biz_id'] = self.biz_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('user_name') is not None:
self.user_name = m.get('user_name')
if m.get('usage_time') is not None:
self.usage_time = m.get('usage_time')
if m.get('issue_cert_type') is not None:
self.issue_cert_type = m.get('issue_cert_type')
if m.get('issue_cert_type_desc') is not None:
self.issue_cert_type_desc = m.get('issue_cert_type_desc')
if m.get('issue_time') is not None:
self.issue_time = m.get('issue_time')
if m.get('purpose') is not None:
self.purpose = m.get('purpose')
if m.get('biz_id') is not None:
self.biz_id = m.get('biz_id')
return self
class IssueCertInfo(TeaModel):
def __init__(
self,
issue_id: str = None,
issue_time: str = None,
provider_name: str = None,
issue_cert_type: str = None,
deposit_cert_tag: str = None,
deposit_cert: str = None,
deposit_cert_pdf: str = None,
deposit_cert_pdf_qr: str = None,
):
# 证明文件ID
self.issue_id = issue_id
# 证明开具时间
self.issue_time = issue_time
# 证明开具单位
self.provider_name = provider_name
# 证明类型
self.issue_cert_type = issue_cert_type
# 证明抬头
self.deposit_cert_tag = deposit_cert_tag
# 证明文件png图片存储地址
self.deposit_cert = deposit_cert
# 证明文件pdf获取地址(不带二维码)
self.deposit_cert_pdf = deposit_cert_pdf
# 证明文件pdf获取地址(带验真码)
self.deposit_cert_pdf_qr = deposit_cert_pdf_qr
def validate(self):
self.validate_required(self.issue_id, 'issue_id')
self.validate_required(self.issue_time, 'issue_time')
self.validate_required(self.issue_cert_type, 'issue_cert_type')
def to_map(self):
result = dict()
if self.issue_id is not None:
result['issue_id'] = self.issue_id
if self.issue_time is not None:
result['issue_time'] = self.issue_time
if self.provider_name is not None:
result['provider_name'] = self.provider_name
if self.issue_cert_type is not None:
result['issue_cert_type'] = self.issue_cert_type
if self.deposit_cert_tag is not None:
result['deposit_cert_tag'] = self.deposit_cert_tag
if self.deposit_cert is not None:
result['deposit_cert'] = self.deposit_cert
if self.deposit_cert_pdf is not None:
result['deposit_cert_pdf'] = self.deposit_cert_pdf
if self.deposit_cert_pdf_qr is not None:
result['deposit_cert_pdf_qr'] = self.deposit_cert_pdf_qr
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
if m.get('issue_time') is not None:
self.issue_time = m.get('issue_time')
if m.get('provider_name') is not None:
self.provider_name = m.get('provider_name')
if m.get('issue_cert_type') is not None:
self.issue_cert_type = m.get('issue_cert_type')
if m.get('deposit_cert_tag') is not None:
self.deposit_cert_tag = m.get('deposit_cert_tag')
if m.get('deposit_cert') is not None:
self.deposit_cert = m.get('deposit_cert')
if m.get('deposit_cert_pdf') is not None:
self.deposit_cert_pdf = m.get('deposit_cert_pdf')
if m.get('deposit_cert_pdf_qr') is not None:
self.deposit_cert_pdf_qr = m.get('deposit_cert_pdf_qr')
return self
class AuthProperty(TeaModel):
def __init__(
self,
key: str = None,
value: List[str] = None,
):
# 地区
self.key = key
# {"山西","广东"}
self.value = value
def validate(self):
self.validate_required(self.key, 'key')
self.validate_required(self.value, 'value')
def to_map(self):
result = dict()
if self.key is not None:
result['key'] = self.key
if self.value is not None:
result['value'] = self.value
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('key') is not None:
self.key = m.get('key')
if m.get('value') is not None:
self.value = m.get('value')
return self
class CpfUserLoanInfo(TeaModel):
def __init__(
self,
user_id: str = None,
user_name: str = None,
po_id: str = None,
po_name: str = None,
loan_id: str = None,
loan_balance: str = None,
loan_status: str = None,
):
# 用户证件号码
self.user_id = user_id
# 证件类型
self.user_name = user_name
# 配偶证件号码
self.po_id = po_id
# 配偶姓名
self.po_name = po_name
# 贷款合同编号
self.loan_id = loan_id
# 贷款余额
self.loan_balance = loan_balance
# 贷款合同状态
self.loan_status = loan_status
def validate(self):
self.validate_required(self.user_id, 'user_id')
self.validate_required(self.user_name, 'user_name')
self.validate_required(self.po_id, 'po_id')
self.validate_required(self.po_name, 'po_name')
self.validate_required(self.loan_id, 'loan_id')
self.validate_required(self.loan_balance, 'loan_balance')
self.validate_required(self.loan_status, 'loan_status')
def to_map(self):
result = dict()
if self.user_id is not None:
result['user_id'] = self.user_id
if self.user_name is not None:
result['user_name'] = self.user_name
if self.po_id is not None:
result['po_id'] = self.po_id
if self.po_name is not None:
result['po_name'] = self.po_name
if self.loan_id is not None:
result['loan_id'] = self.loan_id
if self.loan_balance is not None:
result['loan_balance'] = self.loan_balance
if self.loan_status is not None:
result['loan_status'] = self.loan_status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('user_id') is not None:
self.user_id = m.get('user_id')
if m.get('user_name') is not None:
self.user_name = m.get('user_name')
if m.get('po_id') is not None:
self.po_id = m.get('po_id')
if m.get('po_name') is not None:
self.po_name = m.get('po_name')
if m.get('loan_id') is not None:
self.loan_id = m.get('loan_id')
if m.get('loan_balance') is not None:
self.loan_balance = m.get('loan_balance')
if m.get('loan_status') is not None:
self.loan_status = m.get('loan_status')
return self
class CpfYddkjczmQueryExtendParams(TeaModel):
def __init__(
self,
zjbzxbm: str = None,
xingming: str = None,
zjhm: str = None,
):
# 住建部中心编码
self.zjbzxbm = zjbzxbm
# 姓名
self.xingming = xingming
# 证件号码
self.zjhm = zjhm
def validate(self):
self.validate_required(self.zjbzxbm, 'zjbzxbm')
self.validate_required(self.xingming, 'xingming')
self.validate_required(self.zjhm, 'zjhm')
def to_map(self):
result = dict()
if self.zjbzxbm is not None:
result['zjbzxbm'] = self.zjbzxbm
if self.xingming is not None:
result['xingming'] = self.xingming
if self.zjhm is not None:
result['zjhm'] = self.zjhm
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('zjbzxbm') is not None:
self.zjbzxbm = m.get('zjbzxbm')
if m.get('xingming') is not None:
self.xingming = m.get('xingming')
if m.get('zjhm') is not None:
self.zjhm = m.get('zjhm')
return self
class CertificationInfo(TeaModel):
def __init__(
self,
certification_flag: bool = None,
certify_id: str = None,
):
# 是否授权
self.certification_flag = certification_flag
# 实人认证唯一标识
self.certify_id = certify_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.certification_flag is not None:
result['certification_flag'] = self.certification_flag
if self.certify_id is not None:
result['certify_id'] = self.certify_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('certification_flag') is not None:
self.certification_flag = m.get('certification_flag')
if m.get('certify_id') is not None:
self.certify_id = m.get('certify_id')
return self
class IssueCertParams(TeaModel):
def __init__(
self,
dkhtbh: str = None,
grzh: str = None,
ksrq: str = None,
jsrq: str = None,
gjjdkzx: str = None,
dkje: str = None,
dkqx: str = None,
):
# 贷款合同编号
self.dkhtbh = dkhtbh
# 公积金中心个人账户
self.grzh = grzh
# 查询开始日期
self.ksrq = ksrq
# 查询结束日期
self.jsrq = jsrq
# 公积金贷款中心
self.gjjdkzx = gjjdkzx
# 贷款金额,单位 万元
self.dkje = dkje
# 贷款期限,单位 年
self.dkqx = dkqx
def validate(self):
pass
def to_map(self):
result = dict()
if self.dkhtbh is not None:
result['dkhtbh'] = self.dkhtbh
if self.grzh is not None:
result['grzh'] = self.grzh
if self.ksrq is not None:
result['ksrq'] = self.ksrq
if self.jsrq is not None:
result['jsrq'] = self.jsrq
if self.gjjdkzx is not None:
result['gjjdkzx'] = self.gjjdkzx
if self.dkje is not None:
result['dkje'] = self.dkje
if self.dkqx is not None:
result['dkqx'] = self.dkqx
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dkhtbh') is not None:
self.dkhtbh = m.get('dkhtbh')
if m.get('grzh') is not None:
self.grzh = m.get('grzh')
if m.get('ksrq') is not None:
self.ksrq = m.get('ksrq')
if m.get('jsrq') is not None:
self.jsrq = m.get('jsrq')
if m.get('gjjdkzx') is not None:
self.gjjdkzx = m.get('gjjdkzx')
if m.get('dkje') is not None:
self.dkje = m.get('dkje')
if m.get('dkqx') is not None:
self.dkqx = m.get('dkqx')
return self
class TdmCpfProvinceVO(TeaModel):
def __init__(
self,
code: str = None,
name: str = None,
areas: List[TdmCpfCitysVO] = None,
):
# 省编码
self.code = code
# 省名称
self.name = name
# 公积金中心列表
self.areas = areas
def validate(self):
self.validate_required(self.code, 'code')
self.validate_required(self.name, 'name')
self.validate_required(self.areas, 'areas')
if self.areas:
for k in self.areas:
if k:
k.validate()
def to_map(self):
result = dict()
if self.code is not None:
result['code'] = self.code
if self.name is not None:
result['name'] = self.name
result['areas'] = []
if self.areas is not None:
for k in self.areas:
result['areas'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('code') is not None:
self.code = m.get('code')
if m.get('name') is not None:
self.name = m.get('name')
self.areas = []
if m.get('areas') is not None:
for k in m.get('areas'):
temp_model = TdmCpfCitysVO()
self.areas.append(temp_model.from_map(k))
return self
class CpfDataUseReqSign(TeaModel):
def __init__(
self,
m_sy_app_id: str = None,
m_sy_service: str = None,
m_sy_sign: str = None,
):
# 机构签名ID
self.m_sy_app_id = m_sy_app_id
# 签名service, 需要颁发
self.m_sy_service = m_sy_service
# 签名信息
self.m_sy_sign = m_sy_sign
def validate(self):
self.validate_required(self.m_sy_app_id, 'm_sy_app_id')
self.validate_required(self.m_sy_service, 'm_sy_service')
self.validate_required(self.m_sy_sign, 'm_sy_sign')
def to_map(self):
result = dict()
if self.m_sy_app_id is not None:
result['m_sy_app_id'] = self.m_sy_app_id
if self.m_sy_service is not None:
result['m_sy_service'] = self.m_sy_service
if self.m_sy_sign is not None:
result['m_sy_sign'] = self.m_sy_sign
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('m_sy_app_id') is not None:
self.m_sy_app_id = m.get('m_sy_app_id')
if m.get('m_sy_service') is not None:
self.m_sy_service = m.get('m_sy_service')
if m.get('m_sy_sign') is not None:
self.m_sy_sign = m.get('m_sy_sign')
return self
class CertificationRequest(TeaModel):
def __init__(
self,
biz_code: str = None,
):
# 认证模式
self.biz_code = biz_code
def validate(self):
self.validate_required(self.biz_code, 'biz_code')
def to_map(self):
result = dict()
if self.biz_code is not None:
result['biz_code'] = self.biz_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('biz_code') is not None:
self.biz_code = m.get('biz_code')
return self
class CpfYdgjjdkxxQueryExtendParams(TeaModel):
def __init__(
self,
zjbzxbm: str = None,
xingming: str = None,
zjhm: str = None,
dkhtbh: str = None,
):
# 住建部中心编码
self.zjbzxbm = zjbzxbm
# 姓名
self.xingming = xingming
# 证件号码
self.zjhm = zjhm
# 贷款合同编号
self.dkhtbh = dkhtbh
def validate(self):
self.validate_required(self.zjbzxbm, 'zjbzxbm')
self.validate_required(self.xingming, 'xingming')
self.validate_required(self.zjhm, 'zjhm')
self.validate_required(self.dkhtbh, 'dkhtbh')
def to_map(self):
result = dict()
if self.zjbzxbm is not None:
result['zjbzxbm'] = self.zjbzxbm
if self.xingming is not None:
result['xingming'] = self.xingming
if self.zjhm is not None:
result['zjhm'] = self.zjhm
if self.dkhtbh is not None:
result['dkhtbh'] = self.dkhtbh
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('zjbzxbm') is not None:
self.zjbzxbm = m.get('zjbzxbm')
if m.get('xingming') is not None:
self.xingming = m.get('xingming')
if m.get('zjhm') is not None:
self.zjhm = m.get('zjhm')
if m.get('dkhtbh') is not None:
self.dkhtbh = m.get('dkhtbh')
return self
class CpfDataUsageLogVO(TeaModel):
def __init__(
self,
user_name: str = None,
usage_time: str = None,
data_desc: str = None,
purpose: str = None,
biz_id: str = None,
):
# 使用方名称
self.user_name = user_name
# 使用时间
self.usage_time = usage_time
# 数据项描述
self.data_desc = data_desc
# 用途
self.purpose = purpose
# 业务流水号
self.biz_id = biz_id
def validate(self):
self.validate_required(self.user_name, 'user_name')
self.validate_required(self.usage_time, 'usage_time')
self.validate_required(self.data_desc, 'data_desc')
self.validate_required(self.purpose, 'purpose')
self.validate_required(self.biz_id, 'biz_id')
def to_map(self):
result = dict()
if self.user_name is not None:
result['user_name'] = self.user_name
if self.usage_time is not None:
result['usage_time'] = self.usage_time
if self.data_desc is not None:
result['data_desc'] = self.data_desc
if self.purpose is not None:
result['purpose'] = self.purpose
if self.biz_id is not None:
result['biz_id'] = self.biz_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('user_name') is not None:
self.user_name = m.get('user_name')
if m.get('usage_time') is not None:
self.usage_time = m.get('usage_time')
if m.get('data_desc') is not None:
self.data_desc = m.get('data_desc')
if m.get('purpose') is not None:
self.purpose = m.get('purpose')
if m.get('biz_id') is not None:
self.biz_id = m.get('biz_id')
return self
class CertificationInitResponse(TeaModel):
def __init__(
self,
certify_id: str = None,
outer_order_no: str = None,
scene_id: str = None,
):
# 核身认证唯一标识
self.certify_id = certify_id
# 商户请求唯一标识
self.outer_order_no = outer_order_no
# 场景ID
self.scene_id = scene_id
def validate(self):
self.validate_required(self.certify_id, 'certify_id')
self.validate_required(self.outer_order_no, 'outer_order_no')
self.validate_required(self.scene_id, 'scene_id')
def to_map(self):
result = dict()
if self.certify_id is not None:
result['certify_id'] = self.certify_id
if self.outer_order_no is not None:
result['outer_order_no'] = self.outer_order_no
if self.scene_id is not None:
result['scene_id'] = self.scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('certify_id') is not None:
self.certify_id = m.get('certify_id')
if m.get('outer_order_no') is not None:
self.outer_order_no = m.get('outer_order_no')
if m.get('scene_id') is not None:
self.scene_id = m.get('scene_id')
return self
class TdmVerifyLogVO(TeaModel):
def __init__(
self,
status: str = None,
remark: str = None,
):
# 1:核身创建成功 2:核身验证通过 3:核身验证失败
self.status = status
# 核身结果描述信息
self.remark = remark
def validate(self):
self.validate_required(self.status, 'status')
self.validate_required(self.remark, 'remark')
def to_map(self):
result = dict()
if self.status is not None:
result['status'] = self.status
if self.remark is not None:
result['remark'] = self.remark
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('status') is not None:
self.status = m.get('status')
if m.get('remark') is not None:
self.remark = m.get('remark')
return self
class OpenCpfCertRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
issue_cert_type: str = None,
provider_id: str = None,
data_owner_identity_type: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
extend_params: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 端ID
self.terminal_identity = terminal_identity
# 证明类型
self.issue_cert_type = issue_cert_type
# 公积金中心ID
self.provider_id = provider_id
# 证件类型
self.data_owner_identity_type = data_owner_identity_type
# 数据拥有者ID(身份证ID)
self.data_owner_identity = data_owner_identity
# 数据拥有者姓名(真实姓名)
self.data_owner_name = data_owner_name
# 请求结构体序列化
self.extend_params = extend_params
def validate(self):
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.issue_cert_type, 'issue_cert_type')
self.validate_required(self.provider_id, 'provider_id')
self.validate_required(self.data_owner_identity_type, 'data_owner_identity_type')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.data_owner_name, 'data_owner_name')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.issue_cert_type is not None:
result['issue_cert_type'] = self.issue_cert_type
if self.provider_id is not None:
result['provider_id'] = self.provider_id
if self.data_owner_identity_type is not None:
result['data_owner_identity_type'] = self.data_owner_identity_type
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.extend_params is not None:
result['extend_params'] = self.extend_params
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('issue_cert_type') is not None:
self.issue_cert_type = m.get('issue_cert_type')
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('data_owner_identity_type') is not None:
self.data_owner_identity_type = m.get('data_owner_identity_type')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
return self
class OpenCpfCertResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
issue_id: str = None,
deposit_cert: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 证明文件ID
self.issue_id = issue_id
# 证明文件获取地址
self.deposit_cert = deposit_cert
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.issue_id is not None:
result['issue_id'] = self.issue_id
if self.deposit_cert is not None:
result['deposit_cert'] = self.deposit_cert
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
if m.get('deposit_cert') is not None:
self.deposit_cert = m.get('deposit_cert')
return self
class ListCpfCertRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
data_owner_identity: str = None,
issue_cert_type: str = None,
current_page: int = None,
page_size: int = None,
option_time: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 端ID
self.terminal_identity = terminal_identity
# 身份证ID
self.data_owner_identity = data_owner_identity
# 证明类型
self.issue_cert_type = issue_cert_type
# 当前页码,默认1
self.current_page = current_page
# 每页展示数量,默认10
self.page_size = page_size
# 时间筛选枚举类型,默认全部
self.option_time = option_time
def validate(self):
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.issue_cert_type, 'issue_cert_type')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.issue_cert_type is not None:
result['issue_cert_type'] = self.issue_cert_type
if self.current_page is not None:
result['current_page'] = self.current_page
if self.page_size is not None:
result['page_size'] = self.page_size
if self.option_time is not None:
result['option_time'] = self.option_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('issue_cert_type') is not None:
self.issue_cert_type = m.get('issue_cert_type')
if m.get('current_page') is not None:
self.current_page = m.get('current_page')
if m.get('page_size') is not None:
self.page_size = m.get('page_size')
if m.get('option_time') is not None:
self.option_time = m.get('option_time')
return self
class ListCpfCertResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
current: int = None,
page_size: int = None,
total: int = None,
cert_list: List[CertSummary] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 当前页码
self.current = current
# 每页数量
self.page_size = page_size
# 记录总数
self.total = total
# 证明开具信息列表
self.cert_list = cert_list
def validate(self):
if self.cert_list:
for k in self.cert_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.current is not None:
result['current'] = self.current
if self.page_size is not None:
result['page_size'] = self.page_size
if self.total is not None:
result['total'] = self.total
result['cert_list'] = []
if self.cert_list is not None:
for k in self.cert_list:
result['cert_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('current') is not None:
self.current = m.get('current')
if m.get('page_size') is not None:
self.page_size = m.get('page_size')
if m.get('total') is not None:
self.total = m.get('total')
self.cert_list = []
if m.get('cert_list') is not None:
for k in m.get('cert_list'):
temp_model = CertSummary()
self.cert_list.append(temp_model.from_map(k))
return self
class CheckCpfCertRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
biz_id: str = None,
type: str = None,
terminal_identity: str = None,
issue_id: str = None,
data_owner_identity: str = None,
data_user_identity: str = None,
data_user_name: str = None,
purpose: str = None,
use_time: str = None,
extend_params: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 外部业务流水
self.biz_id = biz_id
# 扫码验真类型
self.type = type
# 端ID
self.terminal_identity = terminal_identity
# 证明ID
self.issue_id = issue_id
# 身份证ID
self.data_owner_identity = data_owner_identity
# 使用方ID
self.data_user_identity = data_user_identity
# 使用方名称
self.data_user_name = data_user_name
# 用途
self.purpose = purpose
# 使用时间
self.use_time = use_time
# 扩展字段
self.extend_params = extend_params
def validate(self):
self.validate_required(self.type, 'type')
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.issue_id, 'issue_id')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
if self.use_time is not None:
self.validate_pattern(self.use_time, 'use_time', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.biz_id is not None:
result['biz_id'] = self.biz_id
if self.type is not None:
result['type'] = self.type
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.issue_id is not None:
result['issue_id'] = self.issue_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_user_identity is not None:
result['data_user_identity'] = self.data_user_identity
if self.data_user_name is not None:
result['data_user_name'] = self.data_user_name
if self.purpose is not None:
result['purpose'] = self.purpose
if self.use_time is not None:
result['use_time'] = self.use_time
if self.extend_params is not None:
result['extend_params'] = self.extend_params
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('biz_id') is not None:
self.biz_id = m.get('biz_id')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_user_identity') is not None:
self.data_user_identity = m.get('data_user_identity')
if m.get('data_user_name') is not None:
self.data_user_name = m.get('data_user_name')
if m.get('purpose') is not None:
self.purpose = m.get('purpose')
if m.get('use_time') is not None:
self.use_time = m.get('use_time')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
return self
class CheckCpfCertResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
issue_id: str = None,
deposit_cert: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 证明ID
self.issue_id = issue_id
# 证明文件获取地址
self.deposit_cert = deposit_cert
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.issue_id is not None:
result['issue_id'] = self.issue_id
if self.deposit_cert is not None:
result['deposit_cert'] = self.deposit_cert
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
if m.get('deposit_cert') is not None:
self.deposit_cert = m.get('deposit_cert')
return self
class SaveCpfCertuseRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
biz_id: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
data_user_identity: str = None,
data_user_name: str = None,
terminal_identity: str = None,
purpose: str = None,
operate_time: str = None,
extend_params: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 业务流水号
self.biz_id = biz_id
# 身份证ID
self.data_owner_identity = data_owner_identity
# 姓名
self.data_owner_name = data_owner_name
# 用数机构ID
self.data_user_identity = data_user_identity
# 用数机构名称
self.data_user_name = data_user_name
# 用数端ID
self.terminal_identity = terminal_identity
# 用数目的
self.purpose = purpose
# 用数时间
self.operate_time = operate_time
# 扩展字段
self.extend_params = extend_params
def validate(self):
self.validate_required(self.biz_id, 'biz_id')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.data_owner_name, 'data_owner_name')
self.validate_required(self.data_user_identity, 'data_user_identity')
self.validate_required(self.data_user_name, 'data_user_name')
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.purpose, 'purpose')
self.validate_required(self.operate_time, 'operate_time')
if self.operate_time is not None:
self.validate_pattern(self.operate_time, 'operate_time', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.biz_id is not None:
result['biz_id'] = self.biz_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.data_user_identity is not None:
result['data_user_identity'] = self.data_user_identity
if self.data_user_name is not None:
result['data_user_name'] = self.data_user_name
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.purpose is not None:
result['purpose'] = self.purpose
if self.operate_time is not None:
result['operate_time'] = self.operate_time
if self.extend_params is not None:
result['extend_params'] = self.extend_params
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('biz_id') is not None:
self.biz_id = m.get('biz_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('data_user_identity') is not None:
self.data_user_identity = m.get('data_user_identity')
if m.get('data_user_name') is not None:
self.data_user_name = m.get('data_user_name')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('purpose') is not None:
self.purpose = m.get('purpose')
if m.get('operate_time') is not None:
self.operate_time = m.get('operate_time')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
return self
class SaveCpfCertuseResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
return self
class GetCpfCertuseRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
biz_id: str = None,
terminal_identity: str = None,
data_user_identity: str = None,
data_user_name: str = None,
issue_id: str = None,
purpose: str = None,
use_time: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 业务流水号
self.biz_id = biz_id
# 端ID
self.terminal_identity = terminal_identity
# 用数机构ID
self.data_user_identity = data_user_identity
# 用数机构
self.data_user_name = data_user_name
# 证明文件ID
self.issue_id = issue_id
# 使用目的
self.purpose = purpose
# 使用时间
self.use_time = use_time
def validate(self):
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.data_user_identity, 'data_user_identity')
self.validate_required(self.data_user_name, 'data_user_name')
self.validate_required(self.issue_id, 'issue_id')
if self.use_time is not None:
self.validate_pattern(self.use_time, 'use_time', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.biz_id is not None:
result['biz_id'] = self.biz_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.data_user_identity is not None:
result['data_user_identity'] = self.data_user_identity
if self.data_user_name is not None:
result['data_user_name'] = self.data_user_name
if self.issue_id is not None:
result['issue_id'] = self.issue_id
if self.purpose is not None:
result['purpose'] = self.purpose
if self.use_time is not None:
result['use_time'] = self.use_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('biz_id') is not None:
self.biz_id = m.get('biz_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('data_user_identity') is not None:
self.data_user_identity = m.get('data_user_identity')
if m.get('data_user_name') is not None:
self.data_user_name = m.get('data_user_name')
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
if m.get('purpose') is not None:
self.purpose = m.get('purpose')
if m.get('use_time') is not None:
self.use_time = m.get('use_time')
return self
class GetCpfCertuseResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
deposit_cert: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 原始文件pdf
self.deposit_cert = deposit_cert
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.deposit_cert is not None:
result['deposit_cert'] = self.deposit_cert
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('deposit_cert') is not None:
self.deposit_cert = m.get('deposit_cert')
return self
class GetCpfCertRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
issue_id: str = None,
file_type: str = None,
terminal_identity: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 证明文件ID
self.issue_id = issue_id
# 证明文件存储类型
self.file_type = file_type
# 端ID
self.terminal_identity = terminal_identity
def validate(self):
self.validate_required(self.issue_id, 'issue_id')
self.validate_required(self.file_type, 'file_type')
self.validate_required(self.terminal_identity, 'terminal_identity')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.issue_id is not None:
result['issue_id'] = self.issue_id
if self.file_type is not None:
result['file_type'] = self.file_type
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
if m.get('file_type') is not None:
self.file_type = m.get('file_type')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
return self
class GetCpfCertResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
issue_id: str = None,
deposit_cert: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 证明文件ID
self.issue_id = issue_id
# 证明文件获取地址
self.deposit_cert = deposit_cert
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.issue_id is not None:
result['issue_id'] = self.issue_id
if self.deposit_cert is not None:
result['deposit_cert'] = self.deposit_cert
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
if m.get('deposit_cert') is not None:
self.deposit_cert = m.get('deposit_cert')
return self
class ListCpfCertuseRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
issue_id: str = None,
option_time: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 端ID
self.terminal_identity = terminal_identity
# 证明ID
self.issue_id = issue_id
# 筛选时间 按月份筛选
self.option_time = option_time
def validate(self):
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.issue_id, 'issue_id')
if self.option_time is not None:
self.validate_pattern(self.option_time, 'option_time', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.issue_id is not None:
result['issue_id'] = self.issue_id
if self.option_time is not None:
result['option_time'] = self.option_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
if m.get('option_time') is not None:
self.option_time = m.get('option_time')
return self
class ListCpfCertuseResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
use_records: List[CertUsageLogVO] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 用证记录列表
self.use_records = use_records
def validate(self):
if self.use_records:
for k in self.use_records:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['use_records'] = []
if self.use_records is not None:
for k in self.use_records:
result['use_records'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.use_records = []
if m.get('use_records') is not None:
for k in m.get('use_records'):
temp_model = CertUsageLogVO()
self.use_records.append(temp_model.from_map(k))
return self
class GetCpfDataRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
biz_id: str = None,
data_user_identity: str = None,
data_user_name: str = None,
data_owner_identity_type: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
provider_id: str = None,
data_code: str = None,
target_code: str = None,
extend_params: str = None,
req_sign: CpfDataUseReqSign = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 端ID
self.terminal_identity = terminal_identity
# 业务流水号
self.biz_id = biz_id
# 使用方ID
self.data_user_identity = data_user_identity
# 使用方名称
self.data_user_name = data_user_name
# 证件类型
self.data_owner_identity_type = data_owner_identity_type
# 用户ID
self.data_owner_identity = data_owner_identity
# 用户姓名
self.data_owner_name = data_owner_name
# 数据源ID
self.provider_id = provider_id
# 数据项code
self.data_code = data_code
# 授权码
self.target_code = target_code
# 扩展字段。
self.extend_params = extend_params
# 用数请求签名信息
self.req_sign = req_sign
def validate(self):
self.validate_required(self.data_user_identity, 'data_user_identity')
self.validate_required(self.data_user_name, 'data_user_name')
self.validate_required(self.data_owner_identity_type, 'data_owner_identity_type')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.data_owner_name, 'data_owner_name')
self.validate_required(self.provider_id, 'provider_id')
self.validate_required(self.data_code, 'data_code')
self.validate_required(self.target_code, 'target_code')
if self.req_sign:
self.req_sign.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.biz_id is not None:
result['biz_id'] = self.biz_id
if self.data_user_identity is not None:
result['data_user_identity'] = self.data_user_identity
if self.data_user_name is not None:
result['data_user_name'] = self.data_user_name
if self.data_owner_identity_type is not None:
result['data_owner_identity_type'] = self.data_owner_identity_type
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.provider_id is not None:
result['provider_id'] = self.provider_id
if self.data_code is not None:
result['data_code'] = self.data_code
if self.target_code is not None:
result['target_code'] = self.target_code
if self.extend_params is not None:
result['extend_params'] = self.extend_params
if self.req_sign is not None:
result['req_sign'] = self.req_sign.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('biz_id') is not None:
self.biz_id = m.get('biz_id')
if m.get('data_user_identity') is not None:
self.data_user_identity = m.get('data_user_identity')
if m.get('data_user_name') is not None:
self.data_user_name = m.get('data_user_name')
if m.get('data_owner_identity_type') is not None:
self.data_owner_identity_type = m.get('data_owner_identity_type')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('data_code') is not None:
self.data_code = m.get('data_code')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
if m.get('req_sign') is not None:
temp_model = CpfDataUseReqSign()
self.req_sign = temp_model.from_map(m['req_sign'])
return self
class GetCpfDataResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
trust_data: str = None,
data_hash: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# PDF、或结构化信息、或加密数据
self.trust_data = trust_data
# 数据hash,数据验真时用
self.data_hash = data_hash
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.trust_data is not None:
result['trust_data'] = self.trust_data
if self.data_hash is not None:
result['data_hash'] = self.data_hash
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('trust_data') is not None:
self.trust_data = m.get('trust_data')
if m.get('data_hash') is not None:
self.data_hash = m.get('data_hash')
return self
class ListCpfDatauseRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
data_user_identity: str = None,
data_owner_identity: str = None,
option_time: str = None,
type: str = None,
data_code: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 端ID
self.terminal_identity = terminal_identity
# 使用方ID
self.data_user_identity = data_user_identity
# 用户身份证ID
self.data_owner_identity = data_owner_identity
# 筛选时间,按月份筛选,默认当前月份
self.option_time = option_time
# 数据类型
self.type = type
# 数据类型
self.data_code = data_code
def validate(self):
self.validate_required(self.data_user_identity, 'data_user_identity')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.data_user_identity is not None:
result['data_user_identity'] = self.data_user_identity
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.option_time is not None:
result['option_time'] = self.option_time
if self.type is not None:
result['type'] = self.type
if self.data_code is not None:
result['data_code'] = self.data_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('data_user_identity') is not None:
self.data_user_identity = m.get('data_user_identity')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('option_time') is not None:
self.option_time = m.get('option_time')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('data_code') is not None:
self.data_code = m.get('data_code')
return self
class ListCpfDatauseResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
use_records: List[CpfDataUsageLogVO] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 机构用数记录
self.use_records = use_records
def validate(self):
if self.use_records:
for k in self.use_records:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['use_records'] = []
if self.use_records is not None:
for k in self.use_records:
result['use_records'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.use_records = []
if m.get('use_records') is not None:
for k in m.get('use_records'):
temp_model = CpfDataUsageLogVO()
self.use_records.append(temp_model.from_map(k))
return self
class QueryCpfUserRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
provider_id: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
data_owner_identity_type: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 端ID
self.terminal_identity = terminal_identity
# 数据源ID
self.provider_id = provider_id
# 用户身份证ID
self.data_owner_identity = data_owner_identity
# 用户姓名
self.data_owner_name = data_owner_name
# 证件类型
self.data_owner_identity_type = data_owner_identity_type
def validate(self):
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.provider_id, 'provider_id')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.data_owner_name, 'data_owner_name')
self.validate_required(self.data_owner_identity_type, 'data_owner_identity_type')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.provider_id is not None:
result['provider_id'] = self.provider_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.data_owner_identity_type is not None:
result['data_owner_identity_type'] = self.data_owner_identity_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('data_owner_identity_type') is not None:
self.data_owner_identity_type = m.get('data_owner_identity_type')
return self
class QueryCpfUserResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
user_account_info: List[CpfUserAccountInfo] = None,
user_loan_info: List[CpfUserLoanInfo] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 个人账户信息
self.user_account_info = user_account_info
# 贷款信息
self.user_loan_info = user_loan_info
def validate(self):
if self.user_account_info:
for k in self.user_account_info:
if k:
k.validate()
if self.user_loan_info:
for k in self.user_loan_info:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['user_account_info'] = []
if self.user_account_info is not None:
for k in self.user_account_info:
result['user_account_info'].append(k.to_map() if k else None)
result['user_loan_info'] = []
if self.user_loan_info is not None:
for k in self.user_loan_info:
result['user_loan_info'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.user_account_info = []
if m.get('user_account_info') is not None:
for k in m.get('user_account_info'):
temp_model = CpfUserAccountInfo()
self.user_account_info.append(temp_model.from_map(k))
self.user_loan_info = []
if m.get('user_loan_info') is not None:
for k in m.get('user_loan_info'):
temp_model = CpfUserLoanInfo()
self.user_loan_info.append(temp_model.from_map(k))
return self
class ExecCpfAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
request_id: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
authorized_identity: str = None,
authorized_name: str = None,
authorized_platform_identity: str = None,
target_code: str = None,
auth_agreement: AuthAgreement = None,
certification_info: CertificationInfo = None,
content: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 流水号
self.request_id = request_id
# 用户身份证ID
self.data_owner_identity = data_owner_identity
# 用户姓名
self.data_owner_name = data_owner_name
# 被授权机构ID
self.authorized_identity = authorized_identity
# 被授权机构名称
self.authorized_name = authorized_name
# 端ID
self.authorized_platform_identity = authorized_platform_identity
# 授权标的
self.target_code = target_code
# 授权协议
self.auth_agreement = auth_agreement
# 核身信息
self.certification_info = certification_info
# 扩展字段
self.content = content
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.authorized_identity, 'authorized_identity')
self.validate_required(self.target_code, 'target_code')
self.validate_required(self.auth_agreement, 'auth_agreement')
if self.auth_agreement:
self.auth_agreement.validate()
if self.certification_info:
self.certification_info.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.request_id is not None:
result['request_id'] = self.request_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_name is not None:
result['authorized_name'] = self.authorized_name
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.target_code is not None:
result['target_code'] = self.target_code
if self.auth_agreement is not None:
result['auth_agreement'] = self.auth_agreement.to_map()
if self.certification_info is not None:
result['certification_info'] = self.certification_info.to_map()
if self.content is not None:
result['content'] = self.content
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_name') is not None:
self.authorized_name = m.get('authorized_name')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('auth_agreement') is not None:
temp_model = AuthAgreement()
self.auth_agreement = temp_model.from_map(m['auth_agreement'])
if m.get('certification_info') is not None:
temp_model = CertificationInfo()
self.certification_info = temp_model.from_map(m['certification_info'])
if m.get('content') is not None:
self.content = m.get('content')
return self
class ExecCpfAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
auth_code: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 授权码
self.auth_code = auth_code
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.auth_code is not None:
result['auth_code'] = self.auth_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('auth_code') is not None:
self.auth_code = m.get('auth_code')
return self
class CancelCpfAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
request_id: str = None,
data_owner_identity: str = None,
authorized_platform_identity: str = None,
auth_code: str = None,
certification_info: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 流水号
self.request_id = request_id
# 身份证ID
self.data_owner_identity = data_owner_identity
# 端ID
self.authorized_platform_identity = authorized_platform_identity
# 授权接口返回的授权码
self.auth_code = auth_code
# 核身信息
self.certification_info = certification_info
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.auth_code, 'auth_code')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.request_id is not None:
result['request_id'] = self.request_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.auth_code is not None:
result['auth_code'] = self.auth_code
if self.certification_info is not None:
result['certification_info'] = self.certification_info
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('auth_code') is not None:
self.auth_code = m.get('auth_code')
if m.get('certification_info') is not None:
self.certification_info = m.get('certification_info')
return self
class CancelCpfAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
return self
class QueryCpfAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
data_owner_identity: str = None,
authorized_identity: str = None,
authorized_platform_identity: str = None,
target_code: str = None,
extend_params: str = None,
auth_state: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 身份证ID
self.data_owner_identity = data_owner_identity
# 被授权机构ID
self.authorized_identity = authorized_identity
# 端ID
self.authorized_platform_identity = authorized_platform_identity
# 标的产品码
self.target_code = target_code
# 扩展字段
self.extend_params = extend_params
# 授权状态
self.auth_state = auth_state
def validate(self):
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.auth_state, 'auth_state')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.target_code is not None:
result['target_code'] = self.target_code
if self.extend_params is not None:
result['extend_params'] = self.extend_params
if self.auth_state is not None:
result['auth_state'] = self.auth_state
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
if m.get('auth_state') is not None:
self.auth_state = m.get('auth_state')
return self
class QueryCpfAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
auth_records: List[AuthRecord] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 授权记录列表
self.auth_records = auth_records
def validate(self):
if self.auth_records:
for k in self.auth_records:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['auth_records'] = []
if self.auth_records is not None:
for k in self.auth_records:
result['auth_records'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.auth_records = []
if m.get('auth_records') is not None:
for k in m.get('auth_records'):
temp_model = AuthRecord()
self.auth_records.append(temp_model.from_map(k))
return self
class VerifyCpfAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
request_id: str = None,
data_owner_identity: str = None,
authorized_identity: str = None,
authorized_platform_identity: str = None,
target_code: str = None,
auth_agreement: AuthAgreement = None,
content: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 业务流水
self.request_id = request_id
# 用户身份证ID
self.data_owner_identity = data_owner_identity
# 授权机构ID
self.authorized_identity = authorized_identity
# 授权端ID
self.authorized_platform_identity = authorized_platform_identity
# 授权业务码
self.target_code = target_code
# 授权协议
self.auth_agreement = auth_agreement
# 扩展参数
self.content = content
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.authorized_identity, 'authorized_identity')
self.validate_required(self.target_code, 'target_code')
self.validate_required(self.auth_agreement, 'auth_agreement')
if self.auth_agreement:
self.auth_agreement.validate()
self.validate_required(self.content, 'content')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.request_id is not None:
result['request_id'] = self.request_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.target_code is not None:
result['target_code'] = self.target_code
if self.auth_agreement is not None:
result['auth_agreement'] = self.auth_agreement.to_map()
if self.content is not None:
result['content'] = self.content
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('auth_agreement') is not None:
temp_model = AuthAgreement()
self.auth_agreement = temp_model.from_map(m['auth_agreement'])
if m.get('content') is not None:
self.content = m.get('content')
return self
class VerifyCpfAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
if_auth: bool = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 是否授权结果
self.if_auth = if_auth
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.if_auth is not None:
result['if_auth'] = self.if_auth
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('if_auth') is not None:
self.if_auth = m.get('if_auth')
return self
class SaveCpfDatauseRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
biz_id: str = None,
data_user_identity: str = None,
data_user_name: str = None,
data_owner_identity_type: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
provider_id: str = None,
data_code: str = None,
target_code: str = None,
data_hash: str = None,
extend_params: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 端ID
self.terminal_identity = terminal_identity
# 业务流水号
self.biz_id = biz_id
# 使用方ID
self.data_user_identity = data_user_identity
# 使用方名称
self.data_user_name = data_user_name
# 证件类型
self.data_owner_identity_type = data_owner_identity_type
# 数据拥有方ID
self.data_owner_identity = data_owner_identity
# 数据拥有方名称
self.data_owner_name = data_owner_name
# 数据源ID
self.provider_id = provider_id
# 数据项code
self.data_code = data_code
# 授权码
self.target_code = target_code
# 存证数据hash
self.data_hash = data_hash
# 扩展字段
self.extend_params = extend_params
def validate(self):
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.biz_id, 'biz_id')
self.validate_required(self.data_user_identity, 'data_user_identity')
self.validate_required(self.data_user_name, 'data_user_name')
self.validate_required(self.data_owner_identity_type, 'data_owner_identity_type')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.data_owner_name, 'data_owner_name')
self.validate_required(self.provider_id, 'provider_id')
self.validate_required(self.data_code, 'data_code')
self.validate_required(self.target_code, 'target_code')
self.validate_required(self.data_hash, 'data_hash')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.biz_id is not None:
result['biz_id'] = self.biz_id
if self.data_user_identity is not None:
result['data_user_identity'] = self.data_user_identity
if self.data_user_name is not None:
result['data_user_name'] = self.data_user_name
if self.data_owner_identity_type is not None:
result['data_owner_identity_type'] = self.data_owner_identity_type
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.provider_id is not None:
result['provider_id'] = self.provider_id
if self.data_code is not None:
result['data_code'] = self.data_code
if self.target_code is not None:
result['target_code'] = self.target_code
if self.data_hash is not None:
result['data_hash'] = self.data_hash
if self.extend_params is not None:
result['extend_params'] = self.extend_params
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('biz_id') is not None:
self.biz_id = m.get('biz_id')
if m.get('data_user_identity') is not None:
self.data_user_identity = m.get('data_user_identity')
if m.get('data_user_name') is not None:
self.data_user_name = m.get('data_user_name')
if m.get('data_owner_identity_type') is not None:
self.data_owner_identity_type = m.get('data_owner_identity_type')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('data_code') is not None:
self.data_code = m.get('data_code')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('data_hash') is not None:
self.data_hash = m.get('data_hash')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
return self
class SaveCpfDatauseResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
chain_info: ChainInfo = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 区块链链信息
self.chain_info = chain_info
def validate(self):
if self.chain_info:
self.chain_info.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.chain_info is not None:
result['chain_info'] = self.chain_info.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('chain_info') is not None:
temp_model = ChainInfo()
self.chain_info = temp_model.from_map(m['chain_info'])
return self
class ConfirmCpfDatauseRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
tx_hash: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 验证端ID
self.terminal_identity = terminal_identity
# 区块链交易hash
self.tx_hash = tx_hash
def validate(self):
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.tx_hash, 'tx_hash')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.tx_hash is not None:
result['tx_hash'] = self.tx_hash
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('tx_hash') is not None:
self.tx_hash = m.get('tx_hash')
return self
class ConfirmCpfDatauseResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data_user_identity: str = None,
data_user_name: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
provider_id: str = None,
provider_name: str = None,
terminal_identity: str = None,
data_desc: str = None,
data_hash: str = None,
chain_info: ChainInfo = None,
extend_params: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 使用方ID
self.data_user_identity = data_user_identity
# 使用方名称
self.data_user_name = data_user_name
# 数据拥有者ID,用户身份证ID
self.data_owner_identity = data_owner_identity
# 数据拥有者名称,用户名称
self.data_owner_name = data_owner_name
# 数据源ID
self.provider_id = provider_id
# 数据源名称
self.provider_name = provider_name
# 存证端ID
self.terminal_identity = terminal_identity
# 业务描述
self.data_desc = data_desc
# 存证数据hash
self.data_hash = data_hash
# 链信息
self.chain_info = chain_info
# 扩展字段
self.extend_params = extend_params
def validate(self):
if self.chain_info:
self.chain_info.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data_user_identity is not None:
result['data_user_identity'] = self.data_user_identity
if self.data_user_name is not None:
result['data_user_name'] = self.data_user_name
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.provider_id is not None:
result['provider_id'] = self.provider_id
if self.provider_name is not None:
result['provider_name'] = self.provider_name
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.data_desc is not None:
result['data_desc'] = self.data_desc
if self.data_hash is not None:
result['data_hash'] = self.data_hash
if self.chain_info is not None:
result['chain_info'] = self.chain_info.to_map()
if self.extend_params is not None:
result['extend_params'] = self.extend_params
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data_user_identity') is not None:
self.data_user_identity = m.get('data_user_identity')
if m.get('data_user_name') is not None:
self.data_user_name = m.get('data_user_name')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('provider_name') is not None:
self.provider_name = m.get('provider_name')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('data_desc') is not None:
self.data_desc = m.get('data_desc')
if m.get('data_hash') is not None:
self.data_hash = m.get('data_hash')
if m.get('chain_info') is not None:
temp_model = ChainInfo()
self.chain_info = temp_model.from_map(m['chain_info'])
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
return self
class CheckCpfAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
request_id: str = None,
data_owner_identity: str = None,
authorized_identity: str = None,
authorized_platform_identity: str = None,
target_code: str = None,
auth_agreement: AuthAgreement = None,
content: AuthProperty = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 流水ID
self.request_id = request_id
# 用户ID
self.data_owner_identity = data_owner_identity
# 授权机构ID
self.authorized_identity = authorized_identity
# 授权端ID
self.authorized_platform_identity = authorized_platform_identity
# 授权业务码
self.target_code = target_code
# 授权协议
self.auth_agreement = auth_agreement
# 扩展字段
self.content = content
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.authorized_identity, 'authorized_identity')
self.validate_required(self.target_code, 'target_code')
if self.auth_agreement:
self.auth_agreement.validate()
self.validate_required(self.content, 'content')
if self.content:
self.content.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.request_id is not None:
result['request_id'] = self.request_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.target_code is not None:
result['target_code'] = self.target_code
if self.auth_agreement is not None:
result['auth_agreement'] = self.auth_agreement.to_map()
if self.content is not None:
result['content'] = self.content.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('auth_agreement') is not None:
temp_model = AuthAgreement()
self.auth_agreement = temp_model.from_map(m['auth_agreement'])
if m.get('content') is not None:
temp_model = AuthProperty()
self.content = temp_model.from_map(m['content'])
return self
class CheckCpfAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
if_auth: bool = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 是否授权
self.if_auth = if_auth
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.if_auth is not None:
result['if_auth'] = self.if_auth
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('if_auth') is not None:
self.if_auth = m.get('if_auth')
return self
class ListCpfSourceRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
return self
class ListCpfSourceResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
cpf_list: List[TdmCpfProvinceVO] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 商业机构公积金中心列表查询结果
self.cpf_list = cpf_list
def validate(self):
if self.cpf_list:
for k in self.cpf_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['cpf_list'] = []
if self.cpf_list is not None:
for k in self.cpf_list:
result['cpf_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.cpf_list = []
if m.get('cpf_list') is not None:
for k in m.get('cpf_list'):
temp_model = TdmCpfProvinceVO()
self.cpf_list.append(temp_model.from_map(k))
return self
class CreateCpfVerifyRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
user_id: str = None,
user_name: str = None,
meta_info: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 用户身份证ID(目前只支持身份证ID)
self.user_id = user_id
# 用户姓名
self.user_name = user_name
# 环境参数,需要通过客户端 SDK 获取
self.meta_info = meta_info
def validate(self):
self.validate_required(self.user_id, 'user_id')
self.validate_required(self.user_name, 'user_name')
self.validate_required(self.meta_info, 'meta_info')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.user_id is not None:
result['user_id'] = self.user_id
if self.user_name is not None:
result['user_name'] = self.user_name
if self.meta_info is not None:
result['meta_info'] = self.meta_info
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('user_id') is not None:
self.user_id = m.get('user_id')
if m.get('user_name') is not None:
self.user_name = m.get('user_name')
if m.get('meta_info') is not None:
self.meta_info = m.get('meta_info')
return self
class CreateCpfVerifyResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
certify_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 可信实人认证唯一标识
self.certify_id = certify_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.certify_id is not None:
result['certify_id'] = self.certify_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('certify_id') is not None:
self.certify_id = m.get('certify_id')
return self
class QueryCpfVerifyRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
certify_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 可信实人认证唯一标识
self.certify_id = certify_id
def validate(self):
self.validate_required(self.certify_id, 'certify_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.certify_id is not None:
result['certify_id'] = self.certify_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('certify_id') is not None:
self.certify_id = m.get('certify_id')
return self
class QueryCpfVerifyResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
verify_vo: TdmVerifyLogVO = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 核身结果
self.verify_vo = verify_vo
def validate(self):
if self.verify_vo:
self.verify_vo.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.verify_vo is not None:
result['verify_vo'] = self.verify_vo.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('verify_vo') is not None:
temp_model = TdmVerifyLogVO()
self.verify_vo = temp_model.from_map(m['verify_vo'])
return self
class RecognizeCpfAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
request_id: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
authorized_identity: str = None,
authorized_name: str = None,
target_code: str = None,
auth_agreement: AuthAgreement = None,
certification_info: CertificationInfo = None,
content: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 业务流水
self.request_id = request_id
# 用户身份证ID
self.data_owner_identity = data_owner_identity
# 用户姓名
self.data_owner_name = data_owner_name
# 被授权机构ID
self.authorized_identity = authorized_identity
# 被授权机构名称
self.authorized_name = authorized_name
# 被授权标
self.target_code = target_code
# 授权协议
self.auth_agreement = auth_agreement
# 核身信息
self.certification_info = certification_info
# 扩展字段
self.content = content
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.authorized_identity, 'authorized_identity')
self.validate_required(self.target_code, 'target_code')
self.validate_required(self.auth_agreement, 'auth_agreement')
if self.auth_agreement:
self.auth_agreement.validate()
self.validate_required(self.certification_info, 'certification_info')
if self.certification_info:
self.certification_info.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.request_id is not None:
result['request_id'] = self.request_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_name is not None:
result['authorized_name'] = self.authorized_name
if self.target_code is not None:
result['target_code'] = self.target_code
if self.auth_agreement is not None:
result['auth_agreement'] = self.auth_agreement.to_map()
if self.certification_info is not None:
result['certification_info'] = self.certification_info.to_map()
if self.content is not None:
result['content'] = self.content
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_name') is not None:
self.authorized_name = m.get('authorized_name')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('auth_agreement') is not None:
temp_model = AuthAgreement()
self.auth_agreement = temp_model.from_map(m['auth_agreement'])
if m.get('certification_info') is not None:
temp_model = CertificationInfo()
self.certification_info = temp_model.from_map(m['certification_info'])
if m.get('content') is not None:
self.content = m.get('content')
return self
class RecognizeCpfAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
auth_code: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 授权码
self.auth_code = auth_code
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.auth_code is not None:
result['auth_code'] = self.auth_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('auth_code') is not None:
self.auth_code = m.get('auth_code')
return self
class ExecAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
authorized_identity: str = None,
authorized_platform_identity: str = None,
auth_agreement: AuthAgreement = None,
content: str = None,
data_owner_identity: str = None,
request_id: str = None,
target_code: str = None,
certification_type: str = None,
certification_info: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 被授权租户身份ID:
#
# 身份证号/统一社会组织机构信用码
self.authorized_identity = authorized_identity
# 分配code
#
#
self.authorized_platform_identity = authorized_platform_identity
# 授权协议
self.auth_agreement = auth_agreement
# 扩展字段,目前是一个json串,传入相关附加信息,如果对应的附加信息key不需要,可以不传,
#
# key对应的value数据字典由平台提供
self.content = content
# 授权租户身份ID:
# 身份证号/统一社会组织机构信用码
#
#
self.data_owner_identity = data_owner_identity
# 请求流水号(64位 由平台方定义)_
# 幂等标示
self.request_id = request_id
# 标的物
#
#
self.target_code = target_code
# 核身产品类型
self.certification_type = certification_type
# 核身信息
#
#
self.certification_info = certification_info
def validate(self):
self.validate_required(self.authorized_identity, 'authorized_identity')
self.validate_required(self.authorized_platform_identity, 'authorized_platform_identity')
self.validate_required(self.auth_agreement, 'auth_agreement')
if self.auth_agreement:
self.auth_agreement.validate()
self.validate_required(self.content, 'content')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.target_code, 'target_code')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.auth_agreement is not None:
result['auth_agreement'] = self.auth_agreement.to_map()
if self.content is not None:
result['content'] = self.content
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.request_id is not None:
result['request_id'] = self.request_id
if self.target_code is not None:
result['target_code'] = self.target_code
if self.certification_type is not None:
result['certification_type'] = self.certification_type
if self.certification_info is not None:
result['certification_info'] = self.certification_info
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('auth_agreement') is not None:
temp_model = AuthAgreement()
self.auth_agreement = temp_model.from_map(m['auth_agreement'])
if m.get('content') is not None:
self.content = m.get('content')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('certification_type') is not None:
self.certification_type = m.get('certification_type')
if m.get('certification_info') is not None:
self.certification_info = m.get('certification_info')
return self
class ExecAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
auth_code: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 授权码
#
#
self.auth_code = auth_code
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.auth_code is not None:
result['auth_code'] = self.auth_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('auth_code') is not None:
self.auth_code = m.get('auth_code')
return self
class CancelAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
data_owner_identity: str = None,
authorized_platform_identity: str = None,
auth_code: str = None,
certification_info: str = None,
request_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 授权租户身份ID:
# 身份证号/统一社会组织机构信用码
self.data_owner_identity = data_owner_identity
# 分配code
self.authorized_platform_identity = authorized_platform_identity
# 标的物,查询授权接口返回
#
#
self.auth_code = auth_code
# 核身信息
#
self.certification_info = certification_info
# 请求流水号
#
# 幂等标示
#
#
self.request_id = request_id
def validate(self):
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.authorized_platform_identity, 'authorized_platform_identity')
self.validate_required(self.auth_code, 'auth_code')
self.validate_required(self.certification_info, 'certification_info')
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.auth_code is not None:
result['auth_code'] = self.auth_code
if self.certification_info is not None:
result['certification_info'] = self.certification_info
if self.request_id is not None:
result['request_id'] = self.request_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('auth_code') is not None:
self.auth_code = m.get('auth_code')
if m.get('certification_info') is not None:
self.certification_info = m.get('certification_info')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
return self
class CancelAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
return self
class QueryAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
authorized_identity: str = None,
authorized_platform_identity: str = None,
data_owner_identity: str = None,
extend_params: str = None,
target_code: str = None,
request_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 被授权租户身份ID:
# 身份证号/统一社会组织机构信用码
self.authorized_identity = authorized_identity
# 分配code
#
#
self.authorized_platform_identity = authorized_platform_identity
# 授权租户身份ID:
# 身份证号/统一社会组织机构信用码
self.data_owner_identity = data_owner_identity
# 扩展信息
self.extend_params = extend_params
# 标的物
#
#
self.target_code = target_code
# 请求流水号幂等标示
self.request_id = request_id
def validate(self):
self.validate_required(self.authorized_identity, 'authorized_identity')
self.validate_required(self.authorized_platform_identity, 'authorized_platform_identity')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.extend_params, 'extend_params')
self.validate_required(self.target_code, 'target_code')
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.extend_params is not None:
result['extend_params'] = self.extend_params
if self.target_code is not None:
result['target_code'] = self.target_code
if self.request_id is not None:
result['request_id'] = self.request_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
return self
class QueryAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
trust_data: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 缴存证明数据(json格式数据)
self.trust_data = trust_data
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.trust_data is not None:
result['trust_data'] = self.trust_data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('trust_data') is not None:
self.trust_data = m.get('trust_data')
return self
class QueryAuthOwnerRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
data_owner_identity: str = None,
authorized_identity: str = None,
authorized_platform_identity: str = None,
target_code: str = None,
extend_params: str = None,
auth_state: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
#
# 授权租户身份ID:
# 身份证号/统一社会组织机构信用码
self.data_owner_identity = data_owner_identity
# 被授权租户身份ID:
# 身份证号/统一社会组织机构信用码
self.authorized_identity = authorized_identity
# 分配code
#
#
self.authorized_platform_identity = authorized_platform_identity
# 标的物
#
#
self.target_code = target_code
# 扩展字段
#
#
self.extend_params = extend_params
# "1", "授权激活状态"
# "2", "授权取消"
# "3", "授权过期"
self.auth_state = auth_state
def validate(self):
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.authorized_platform_identity, 'authorized_platform_identity')
self.validate_required(self.auth_state, 'auth_state')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.target_code is not None:
result['target_code'] = self.target_code
if self.extend_params is not None:
result['extend_params'] = self.extend_params
if self.auth_state is not None:
result['auth_state'] = self.auth_state
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
if m.get('auth_state') is not None:
self.auth_state = m.get('auth_state')
return self
class QueryAuthOwnerResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
auth_records: List[AuthRecord] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 授权记录集合
self.auth_records = auth_records
def validate(self):
if self.auth_records:
for k in self.auth_records:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['auth_records'] = []
if self.auth_records is not None:
for k in self.auth_records:
result['auth_records'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.auth_records = []
if m.get('auth_records') is not None:
for k in m.get('auth_records'):
temp_model = AuthRecord()
self.auth_records.append(temp_model.from_map(k))
return self
class QueryAuthuseOwnerRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
authorized_platform_identity: str = None,
auth_code: str = None,
data_owner_identity: str = None,
extend_params: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 分配code
#
#
self.authorized_platform_identity = authorized_platform_identity
# 授权码
#
#
self.auth_code = auth_code
# 授权租户身份ID:
#
# 身份证号/统一社会组织机构信用码
self.data_owner_identity = data_owner_identity
# 扩展字段
self.extend_params = extend_params
def validate(self):
self.validate_required(self.authorized_platform_identity, 'authorized_platform_identity')
self.validate_required(self.auth_code, 'auth_code')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.auth_code is not None:
result['auth_code'] = self.auth_code
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.extend_params is not None:
result['extend_params'] = self.extend_params
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('auth_code') is not None:
self.auth_code = m.get('auth_code')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
return self
class QueryAuthuseOwnerResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
use_records: List[AuthUsedRecord] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 授权使用记录
#
#
self.use_records = use_records
def validate(self):
if self.use_records:
for k in self.use_records:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['use_records'] = []
if self.use_records is not None:
for k in self.use_records:
result['use_records'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.use_records = []
if m.get('use_records') is not None:
for k in m.get('use_records'):
temp_model = AuthUsedRecord()
self.use_records.append(temp_model.from_map(k))
return self
class ExecAuthuseRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
authorized_identity: str = None,
authorized_platform_identity: str = None,
data_owner_identity: str = None,
extend_params: str = None,
request_id: str = None,
target_code: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 被授权租户身份ID:
#
# 身份证号/统一社会组织机构信用码
#
#
self.authorized_identity = authorized_identity
# 分配code
#
#
self.authorized_platform_identity = authorized_platform_identity
# 授权租户身份ID:
# 身份证号/统一社会组织机构信用码
self.data_owner_identity = data_owner_identity
# 扩展字段
#
#
self.extend_params = extend_params
# 请求流水号
#
# 幂等标示
#
#
self.request_id = request_id
# 标的物
#
#
self.target_code = target_code
def validate(self):
self.validate_required(self.authorized_identity, 'authorized_identity')
self.validate_required(self.authorized_platform_identity, 'authorized_platform_identity')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.extend_params, 'extend_params')
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.target_code, 'target_code')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.extend_params is not None:
result['extend_params'] = self.extend_params
if self.request_id is not None:
result['request_id'] = self.request_id
if self.target_code is not None:
result['target_code'] = self.target_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
return self
class ExecAuthuseResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
trust_data: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 缴存证明数据
#
#
self.trust_data = trust_data
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.trust_data is not None:
result['trust_data'] = self.trust_data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('trust_data') is not None:
self.trust_data = m.get('trust_data')
return self
class InitCpfVerifyRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
certification_request: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 核身初始化请求信息
self.certification_request = certification_request
def validate(self):
self.validate_required(self.certification_request, 'certification_request')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.certification_request is not None:
result['certification_request'] = self.certification_request
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('certification_request') is not None:
self.certification_request = m.get('certification_request')
return self
class InitCpfVerifyResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
result_obj: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 核身初始化返回信息
self.result_obj = result_obj
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.result_obj is not None:
result['result_obj'] = self.result_obj
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('result_obj') is not None:
self.result_obj = m.get('result_obj')
return self
| 35.849031
| 168
| 0.601772
| 24,162
| 183,081
| 4.291615
| 0.020942
| 0.046194
| 0.083148
| 0.06741
| 0.892472
| 0.852836
| 0.832748
| 0.820067
| 0.807819
| 0.795003
| 0
| 0.000658
| 0.302347
| 183,081
| 5,106
| 169
| 35.856052
| 0.811183
| 0.02606
| 0
| 0.844595
| 1
| 0.000965
| 0.115491
| 0.011325
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079151
| false
| 0.005309
| 0.000483
| 0
| 0.138996
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ad1adddab3dcab908949e38a517dbd02c0a0f9d7
| 13,114
|
py
|
Python
|
apps/challenges/task_definitions.py
|
tdd-ai/Mukayese
|
f099486a01c12e5166d1d5f40913982471ad5bba
|
[
"BSD-3-Clause"
] | 2
|
2021-09-14T16:51:45.000Z
|
2021-12-23T19:55:47.000Z
|
apps/challenges/task_definitions.py
|
tdd-ai/Mukayese
|
f099486a01c12e5166d1d5f40913982471ad5bba
|
[
"BSD-3-Clause"
] | null | null | null |
apps/challenges/task_definitions.py
|
tdd-ai/Mukayese
|
f099486a01c12e5166d1d5f40913982471ad5bba
|
[
"BSD-3-Clause"
] | null | null | null |
task_definition = """
{{
"family":"{queue_name}",
"executionRoleArn":"{EXECUTION_ROLE_ARN}",
"networkMode":"awsvpc",
"containerDefinitions":[
{{
"name": "{container_name}",
"image": "{WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCOUNT_ID",
"value": "{AWS_ACCOUNT_ID}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "AWS_STORAGE_BUCKET_NAME",
"value": "{AWS_STORAGE_BUCKET_NAME}"
}},
{{
"name": "CHALLENGE_PK",
"value": "{challenge_pk}"
}},
{{
"name": "CHALLENGE_QUEUE",
"value": "{queue_name}"
}},
{{
"name": "DJANGO_SERVER",
"value": "{DJANGO_SERVER}"
}},
{{
"name": "DJANGO_SETTINGS_MODULE",
"value": "settings.{ENV}"
}},
{{
"name": "DEBUG",
"value": "{DEBUG}"
}},
{{
"name": "EMAIL_HOST",
"value": "{EMAIL_HOST}"
}},
{{
"name": "EMAIL_HOST_PASSWORD",
"value": "{EMAIL_HOST_PASSWORD}"
}},
{{
"name": "EMAIL_HOST_USER",
"value": "{EMAIL_HOST_USER}"
}},
{{
"name": "EMAIL_PORT",
"value": "{EMAIL_PORT}"
}},
{{
"name": "EMAIL_USE_TLS",
"value": "{EMAIL_USE_TLS}"
}},
{{
"name": "MEMCACHED_LOCATION",
"value": "{MEMCACHED_LOCATION}"
}},
{{
"name": "PYTHONUNBUFFERED",
"value": "1"
}},
{{
"name": "RDS_DB_NAME",
"value": "{RDS_DB_NAME}"
}},
{{
"name": "RDS_HOSTNAME",
"value": "{RDS_HOSTNAME}"
}},
{{
"name": "RDS_PASSWORD",
"value": "{RDS_PASSWORD}"
}},
{{
"name": "RDS_USERNAME",
"value": "{RDS_USERNAME}"
}},
{{
"name": "RDS_PORT",
"value": "{RDS_PORT}"
}},
{{
"name": "SECRET_KEY",
"value": "{SECRET_KEY}"
}},
{{
"name": "SENTRY_URL",
"value": "{SENTRY_URL}"
}},
{{
"name": "AWS_SES_REGION_NAME",
"value": "{AWS_SES_REGION_NAME}"
}},
{{
"name": "AWS_SES_REGION_ENDPOINT",
"value": "{AWS_SES_REGION_ENDPOINT}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "eu-central-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}}
],
"requiresCompatibilities":[
"FARGATE"
],
"cpu": "{CPU}",
"memory": "{MEMORY}",
}}
"""
task_definition_code_upload_worker = """
{{
"family":"{queue_name}",
"executionRoleArn":"{EXECUTION_ROLE_ARN}",
"networkMode":"awsvpc",
"containerDefinitions":[
{{
"name": "{code_upload_container_name}",
"image": "{CODE_UPLOAD_WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "CLUSTER_NAME",
"value": "{cluster_name}"
}},
{{
"name": "CLUSTER_ENDPOINT",
"value": "{cluster_endpoint}"
}},
{{
"name": "CERTIFICATE",
"value": "{certificate}"
}},
{{
"name": "CIDR",
"value": "{CIDR}"
}},
{{
"name": "QUEUE_NAME",
"value": "{queue_name}"
}},
{{
"name": "EVALAI_API_SERVER",
"value": "{EVALAI_API_SERVER}"
}},
{{
"name": "AUTH_TOKEN",
"value": "{auth_token}"
}},
{{
"name": "EVALAI_DNS",
"value": "{EVALAI_DNS}"
}},
{{
"name": "EFS_ID",
"value": "{EFS_ID}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "eu-central-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}}
],
"requiresCompatibilities":[
"FARGATE"
],
"cpu": "{CPU}",
"memory": "{MEMORY}",
}}
"""
task_definition_static_code_upload_worker = """
{{
"family":"{queue_name}",
"executionRoleArn":"{EXECUTION_ROLE_ARN}",
"networkMode":"awsvpc",
"containerDefinitions":[
{code_upload_container},
{submission_container}
],
"requiresCompatibilities":[
"FARGATE"
],
"cpu": "{CPU}",
"memory": "{MEMORY}",
}}
"""
container_definition_submission_worker = """
{{
"name": "{container_name}",
"image": "{WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCOUNT_ID",
"value": "{AWS_ACCOUNT_ID}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "AWS_STORAGE_BUCKET_NAME",
"value": "{AWS_STORAGE_BUCKET_NAME}"
}},
{{
"name": "CHALLENGE_PK",
"value": "{challenge_pk}"
}},
{{
"name": "CHALLENGE_QUEUE",
"value": "{queue_name}"
}},
{{
"name": "DJANGO_SERVER",
"value": "{DJANGO_SERVER}"
}},
{{
"name": "DJANGO_SETTINGS_MODULE",
"value": "settings.{ENV}"
}},
{{
"name": "DEBUG",
"value": "{DEBUG}"
}},
{{
"name": "EMAIL_HOST",
"value": "{EMAIL_HOST}"
}},
{{
"name": "EMAIL_HOST_PASSWORD",
"value": "{EMAIL_HOST_PASSWORD}"
}},
{{
"name": "EMAIL_HOST_USER",
"value": "{EMAIL_HOST_USER}"
}},
{{
"name": "EMAIL_PORT",
"value": "{EMAIL_PORT}"
}},
{{
"name": "EMAIL_USE_TLS",
"value": "{EMAIL_USE_TLS}"
}},
{{
"name": "MEMCACHED_LOCATION",
"value": "{MEMCACHED_LOCATION}"
}},
{{
"name": "PYTHONUNBUFFERED",
"value": "1"
}},
{{
"name": "RDS_DB_NAME",
"value": "{RDS_DB_NAME}"
}},
{{
"name": "RDS_HOSTNAME",
"value": "{RDS_HOSTNAME}"
}},
{{
"name": "RDS_PASSWORD",
"value": "{RDS_PASSWORD}"
}},
{{
"name": "RDS_USERNAME",
"value": "{RDS_USERNAME}"
}},
{{
"name": "RDS_PORT",
"value": "{RDS_PORT}"
}},
{{
"name": "SECRET_KEY",
"value": "{SECRET_KEY}"
}},
{{
"name": "SENTRY_URL",
"value": "{SENTRY_URL}"
}},
{{
"name": "AWS_SES_REGION_NAME",
"value": "{AWS_SES_REGION_NAME}"
}},
{{
"name": "AWS_SES_REGION_ENDPOINT",
"value": "{AWS_SES_REGION_ENDPOINT}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "eu-central-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}}
"""
container_definition_code_upload_worker = """
{{
"name": "{code_upload_container_name}",
"image": "{CODE_UPLOAD_WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "CLUSTER_NAME",
"value": "{cluster_name}"
}},
{{
"name": "CLUSTER_ENDPOINT",
"value": "{cluster_endpoint}"
}},
{{
"name": "CERTIFICATE",
"value": "{certificate}"
}},
{{
"name": "CIDR",
"value": "{CIDR}"
}},
{{
"name": "QUEUE_NAME",
"value": "{queue_name}"
}},
{{
"name": "EVALAI_API_SERVER",
"value": "{EVALAI_API_SERVER}"
}},
{{
"name": "AUTH_TOKEN",
"value": "{auth_token}"
}},
{{
"name": "EVALAI_DNS",
"value": "{EVALAI_DNS}"
}},
{{
"name": "EFS_ID",
"value": "{EFS_ID}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "eu-central-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}}
"""
service_definition = """
{{
"cluster":"{CLUSTER}",
"serviceName":"{service_name}",
"taskDefinition":"{task_def_arn}",
"desiredCount":1,
"clientToken":"{client_token}",
"launchType":"FARGATE",
"platformVersion":"LATEST",
"networkConfiguration":{{
"awsvpcConfiguration": {{
"subnets": [
"{SUBNET_1}",
"{SUBNET_2}",
],
'securityGroups': [
"{SUBNET_SECURITY_GROUP}",
],
"assignPublicIp": "ENABLED"
}}
}},
"schedulingStrategy":"REPLICA",
"deploymentController":{{
"type": "ECS"
}},
"deploymentConfiguration":{{
"deploymentCircuitBreaker":{{
"enable": True,
"rollback": False
}}
}}
}}
"""
update_service_args = """
{{
"cluster":"{CLUSTER}",
"service":"{service_name}",
"desiredCount":num_of_tasks,
"taskDefinition":"{task_def_arn}",
"forceNewDeployment":{force_new_deployment}
}}
"""
delete_service_args = """
{{
"cluster": "{CLUSTER}",
"service": "{service_name}",
"force": False
}}
"""
| 26.817996
| 60
| 0.377307
| 827
| 13,114
| 5.622733
| 0.151149
| 0.030108
| 0.020645
| 0.024086
| 0.867097
| 0.867097
| 0.856774
| 0.83828
| 0.83828
| 0.83828
| 0
| 0.001262
| 0.456001
| 13,114
| 488
| 61
| 26.872951
| 0.650547
| 0
| 0
| 0.930233
| 0
| 0
| 0.976361
| 0.176605
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.016913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a8e1be35826c0b5e31baa28aa502830019f7bf85
| 7,456
|
py
|
Python
|
tests/test_monitor_resources.py
|
CFMTech/monitor-server-api
|
51b927ae929ffec46c06a6d248969f6343ab5241
|
[
"MIT"
] | 3
|
2021-06-16T07:05:39.000Z
|
2021-06-16T18:48:04.000Z
|
tests/test_monitor_resources.py
|
CFMTech/monitor-server-api
|
51b927ae929ffec46c06a6d248969f6343ab5241
|
[
"MIT"
] | 2
|
2021-10-15T21:35:55.000Z
|
2021-10-19T11:16:58.000Z
|
tests/test_monitor_resources.py
|
CFMTech/monitor-server-api
|
51b927ae929ffec46c06a6d248969f6343ab5241
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 Jean-Sébastien Dieu <jean-sebastien.dieu@cfm.fr>
#
# SPDX-License-Identifier: MIT
def test_list_head_resources_on_all_metrics(monitor, gen):
# Lets generate 200 metrics
c, s = gen.new_context(), gen.new_session()
for i in range(200):
mem = abs(100 - i) * 100
m = gen.new_metric(c, s, item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/head/15/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [10000, 9900, 9900, 9800, 9800,
9700, 9700, 9600, 9600, 9500,
9500, 9400, 9400, 9300, 9300]
def test_list_tail_memory_on_all_metrics(monitor, gen):
# Lets generate 200 metrics
c, s = gen.new_context(), gen.new_session()
for i in range(200):
mem = abs(100 - i) * 100
m = gen.new_metric(c, s, item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/tail/15/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [700, 700, 600, 600, 500,
500, 400, 400, 300, 300,
200, 200, 100, 100, 0]
def test_list_head_memory_on_components(monitor, gen):
# Lets generate 200 metrics
c, s = gen.new_context(), gen.new_session()
for i in range(100):
mem = i * 100
m = gen.new_metric(c, s, component="compA", item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s, component="compB", item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/components/compB/head/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [99, 98, 97, 96, 95]
for m in jdata["metrics"]:
assert m['component'] == 'compB'
def test_list_tail_memory_on_components(monitor, gen):
# Lets generate 200 metrics
c, s = gen.new_context(), gen.new_session()
for i in range(100):
mem = (i + 10) * 100
m = gen.new_metric(c, s, component="compA", item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s, component="compB", item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/components/compB/tail/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [4, 3, 2, 1, 0]
for m in jdata["metrics"]:
assert m['component'] == 'compB'
def test_list_head_memory_on_pipeline(monitor, gen):
# Lets generate 200 metrics
c = gen.new_context()
s1, s2 = gen.new_session(pipeline_branch="pipeline1"), gen.new_session(pipeline_branch="pipeline2")
monitor.post_sessions_v1(s1, s2)
for i in range(100):
mem = (i + 101) * 100
m = gen.new_metric(c, s1, mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s2, mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/pipelines/pipeline2/head/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [99, 98, 97, 96, 95]
for m in jdata["metrics"]:
assert m["session_h"] == s2["session_h"]
def test_list_tail_memory_on_pipeline(monitor, gen):
# Lets generate 200 metrics
c = gen.new_context()
s1, s2 = gen.new_session(pipeline_branch="pipeline1"), gen.new_session(pipeline_branch="pipeline2")
monitor.post_sessions_v1(s1, s2)
for i in range(100):
mem = (i + 101) * 100
m = gen.new_metric(c, s1, mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s2, mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/pipelines/pipeline2/tail/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [4, 3, 2, 1, 0]
for m in jdata["metrics"]:
assert m["session_h"] == s2["session_h"]
def test_list_head_memory_of_build(monitor, gen):
# Lets generate 200 metrics
c = gen.new_context()
s1 = gen.new_session(pipeline_branch="pipeline1", pipeline_build_no="1")
s2 = gen.new_session(pipeline_branch="pipeline1", pipeline_build_no="2")
monitor.post_sessions_v1(s1, s2)
for i in range(100):
mem = (i + 101) * 100
m = gen.new_metric(c, s1, mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s2, mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/pipelines/pipeline1/builds/2/head/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [99, 98, 97, 96, 95]
for m in jdata["metrics"]:
assert m["session_h"] == s2["session_h"]
def test_list_tail_memory_of_build(monitor, gen):
# Lets generate 200 metrics
c = gen.new_context()
s1 = gen.new_session(pipeline_branch="pipeline1", pipeline_build_no="1")
s2 = gen.new_session(pipeline_branch="pipeline1", pipeline_build_no="2")
monitor.post_sessions_v1(s1, s2)
for i in range(100):
mem = (i + 101) * 100
m = gen.new_metric(c, s1, mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s2, mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/pipelines/pipeline1/builds/2/tail/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [4, 3, 2, 1, 0]
for m in jdata["metrics"]:
assert m["session_h"] == s2["session_h"]
| 40.967033
| 103
| 0.652629
| 1,104
| 7,456
| 4.237319
| 0.09692
| 0.043608
| 0.017956
| 0.03292
| 0.951689
| 0.950834
| 0.941428
| 0.941428
| 0.941428
| 0.941428
| 0
| 0.059136
| 0.217543
| 7,456
| 181
| 104
| 41.19337
| 0.742715
| 0.097908
| 0
| 0.835616
| 1
| 0.013699
| 0.14584
| 0.066508
| 0
| 0
| 0
| 0
| 0.205479
| 1
| 0.054795
| false
| 0
| 0
| 0
| 0.054795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d1112c9bb742ed7f1238ff0c26540fb1c14b4d67
| 11,953
|
py
|
Python
|
wallgen.py
|
TheSpeedX/wallgen
|
a36a9d7a0ce6e93a06d254f1508d01d29c0f483b
|
[
"MIT"
] | 35
|
2019-09-13T09:11:49.000Z
|
2022-03-02T21:16:28.000Z
|
wallgen.py
|
TheSpeedX/wallgen
|
a36a9d7a0ce6e93a06d254f1508d01d29c0f483b
|
[
"MIT"
] | null | null | null |
wallgen.py
|
TheSpeedX/wallgen
|
a36a9d7a0ce6e93a06d254f1508d01d29c0f483b
|
[
"MIT"
] | 1
|
2020-03-15T03:16:53.000Z
|
2020-03-15T03:16:53.000Z
|
import sys
import time
import click
import numpy as np
from tools.gradient import *
from tools.shapes import *
from tools.wallpaper import *
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def cli():
pass
@cli.command()
@click.argument("side", type=click.INT)
@click.option("--colors", "-c", multiple=True, type=click.STRING, metavar="HEXCODE", help="Use many colors in a custom gradient")
@click.option("--points", "-p", default=100, metavar="no-of-points", help="Number of points to use, default = 100")
@click.option("--show", "-s", is_flag=True, help="Open the image")
@click.option("--outline", "-o", default=None, metavar="HEXCODE", help="Outline the triangles")
@click.option("--name", "-n", metavar="/path/to/output_file", help="Rename the output file")
@click.option("--only-color", "-oc", is_flag=True, help="Generate just a gradient image")
@click.option("--use-nn", "-un", is_flag=True, help="Use NbyNGradient function")
@click.option("--swirl", "-sw", is_flag=True, help="Swirl the gradient")
@click.option("--scale", "-sc", default=2, help="""Scale image to do anti-aliasing. Default=2. scale=1 means no antialiasing. [WARNING: Very memory expensive]""")
@click.option("--set-wall", "-w", is_flag=True, help="Set the generated image as your Desktop wallpaper")
def poly(side, points, show, colors, outline, name, only_color, use_nn, swirl, scale,set_wall):
""" Generates a HQ low poly image using a gradient """
error = ""
if side < 50:
error = "Image too small. Minimum size 50"
elif points < 3:
error = "Too less points. Minimum points 3"
elif points > 200000:
error = "Too many points. Maximum points 200000"
elif scale<1:
error = "Invalid scale value"
if error:
click.secho(error, fg='red', err=True)
sys.exit(1)
side = side * scale# increase size to anti alias
shift = side//10
nside = side + shift*2 # increase size to prevent underflow
if colors:
if len(colors) < 2:
click.secho("One color gradient not possible.", fg="red", err=True)
sys.exit(1)
cs = [tuple(bytes.fromhex(c[1:])) for c in colors]
img = nGradient(nside, *cs)
else:
if use_nn:
points = 1000 if points < 1000 else points
img = NbyNGradient(nside)
else:
img = random_gradient(nside)
if swirl:
img = swirl_image(img)
if not only_color:
if outline:
try:
outline = tuple(bytes.fromhex(outline[1:]))
except Exception as e:
click.secho("Invalid color hex", fg='red', err=True)
sys.exit(1)
print("Preparing image", end="")
pts = genPoints(points, nside, nside)
print("\r", end="")
print("Generated points", end="")
img = genPoly(side, side, img, pts, shift, shift, outl=outline)
print("\r", end="")
print("Making final tweaks", end="")
img = img.resize((side//scale, side//scale), resample=Image.BICUBIC)
if show:
img.show()
file_name = ""
if name:
file_name = "{}.png".format(name)
img.save(file_name)
else:
file_name = "wall-{}.png".format(int(time.time()))
img.save(file_name)
print("\r", end="")
print(f"Image is stored at {file_name}")
if set_wall:
setwallpaper(file_name)
@cli.command()
@click.argument("side", type=click.INT)
@click.option("--type", "-t", "shape", metavar="SHAPE", type=click.Choice(['square', 'hex', 'diamond', 'triangle', 'isometric']), help="Choose which shape to use")
@click.option("--colors", "-c", multiple=True, type=click.STRING, metavar="HEXCODE", help="Use many colors in a custom gradient")
@click.option("--percent", "-p", type=click.INT, metavar="1-10", default=1, help="Use this percentage to determine number of polygons. [1-10]")
@click.option("--show", "-s", is_flag=True, help="Open the image")
@click.option("--outline", "-o", default=None, metavar="HEXCODE", help="Outline the shapes")
@click.option("--name", "-n", metavar="/path/to/output_file", help="Rename the output file")
@click.option("--use-nn", "-un", is_flag=True, help="Use NbyNGradient function")
@click.option("--swirl", "-sw", is_flag=True, help="Swirl the gradient")
@click.option("--scale", "-sc", default=2, help="""Scale image to do anti-aliasing. Default=2. scale=1 means no antialiasing. [WARNING: Very memory expensive]""")
@click.option("--set-wall", "-w", is_flag=True, help="Set the generated image as your Desktop wallpaper")
def shape(side, shape, colors, show, outline, name, percent, use_nn, swirl, scale,set_wall):
""" Generates a HQ image of a beautiful shapes """
error = ""
if side < 50:
error = "Image too small. Minimum size 50"
if percent != None:
if percent < 1 or percent > 10:
error = "Error {} : Percent range 1-10".format(percent)
if error:
click.secho(error, fg='red', err=True)
sys.exit(1)
side = side * scale # increase size to anti alias
if colors:
if len(colors) < 2:
click.secho("One color gradient not possible.", fg="red", err=True)
sys.exit(1)
cs = [tuple(bytes.fromhex(c[1:])) for c in colors]
img = nGradient(side, *cs)
else:
if use_nn:
img = NbyNGradient(side)
else:
img = random_gradient(side)
if swirl:
img = swirl_image(img)
if outline:
try:
outline = tuple(bytes.fromhex(outline[1:]))
except Exception as e:
click.secho("Invalid color hex", fg='red', err=True)
sys.exit(1)
print("Preparing image", end="")
if shape == 'hex':
percent = percent if percent else 5
img = genHexagon(side, side, img, outline, per=(percent or 1))
elif shape == 'square':
img = genSquares(side, side, img, outline, per=(percent or 1))
elif shape == 'diamond':
img = genDiamond(side, side, img, outline, per=(percent or 1))
elif shape == 'triangle':
img = genTriangle(side, side, img, outline, per=(percent or 1))
elif shape == 'isometric':
img = genIsometric(side, side, img, outline, per=(percent or 1))
else:
error = "No shape given. To see list of shapes \"wallgen shape --help\""
click.secho(error, fg='red', err=True)
sys.exit(1)
print("\r", end="")
print("Making final tweaks", end="")
img = img.resize((side//scale, side//scale), resample=Image.BICUBIC)
if show:
img.show()
file_name = ""
if name:
file_name = "{}.png".format(name)
img.save(file_name)
else:
file_name = "wall-{}.png".format(int(time.time()))
img.save(file_name)
print("\r", end="")
print(f"Image is stored at {file_name}")
if set_wall:
setwallpaper(file_name)
@cli.command()
@click.argument("side", type=click.INT)
@click.option("--show", "-s", is_flag=True, help="Open the image")
@click.option("--name", "-n", help="Rename the output")
@click.option("--swirl", "-sw", is_flag=True, help="Swirl the image")
@click.option("--set-wall", "-w", is_flag=True, help="Set the generated image as your Desktop wallpaper")
def slants(side, show, name, swirl,set_wall):
""" Generates slanting lines of various colors """
scale = 2
side = side * scale # increase size to anti alias
print("Preparing image", end="")
img = drawSlants(side)
print("\r", end="")
print("Making final tweaks", end="")
img = img.resize((side//scale, side//scale), resample=Image.BICUBIC)
if swirl:
img = swirl_image(img)
if show:
img.show()
file_name = ""
if name:
file_name = "{}.png".format(name)
img.save(file_name)
else:
file_name = "wall-{}.png".format(int(time.time()))
img.save(file_name)
print("\r", end="")
print(f"Image is stored at {file_name}")
if set_wall:
setwallpaper(file_name)
@cli.group()
def pic():
""" Use a picture instead of a gradient """
pass
@pic.command()
@click.argument("image", type=click.Path(exists=True, dir_okay=False))
@click.option("--points", "-p", default=1000, metavar="no-of-points", help="Number of points to use, default = 1000")
@click.option("--show", "-s", is_flag=True, help="Open the image")
@click.option("--outline", "-o", default=None, metavar="HEXCODE", help="Outline the triangles")
@click.option("--name", "-n", metavar="/path/to/output_file", help="Rename the output file")
@click.option("--smart","-sm", is_flag=True, help="Use smart points")
@click.option("--set-wall", "-w", is_flag=True, help="Set the generated image as your Desktop wallpaper")
def poly(image, points, show, outline, name, smart,set_wall):
""" Generates a HQ low poly image """
if points < 3:
error = "Too less points. Minimum points 3"
elif points > 200000:
error = "Too many points. Maximum points {}".format(200000)
else:
error = None
if error:
click.secho(error, fg='red', err=True)
sys.exit(1)
# wshift = img.width//10
# hshift = img.height//10
# width += wshift*1
# height += hshift*2
if outline:
try:
outline = tuple(bytes.fromhex(outline[1:]))
except Exception as e:
click.secho("Invalid color hex", fg='red', err=True)
sys.exit(1)
print("Preparing image", end="")
img = Image.open(image)
width = img.width
height = img.height
wshift = width//100
hshift = height//100
n_width = width + 2*wshift
n_height = height + 2*hshift
if smart:
# Sobel Edge
ski_img = np.array(img)
gray_img = color.rgb2gray(ski_img)
pts = genSmartPoints(gray_img)
else:
pts = genPoints(points, n_width, n_height)
print("\r", end="")
print("Generated points", end="")
final_img = genPoly(img.width, img.height, img, pts, wshift, hshift, outline, pic=True)
print("\r", end="")
print("Making final tweaks", end="")
if show:
final_img.show()
file_name = ""
if name:
file_name = "{}.png".format(name)
final_img.save(file_name)
else:
file_name = "wall-{}.png".format(int(time.time()))
final_img.save(file_name)
print("\r", end="")
print(f"Image is stored at {file_name}")
if set_wall:
setwallpaper(file_name)
@pic.command()
@click.argument("image", type=click.Path(exists=True, dir_okay=False))
@click.option("--type", "-t", "shape", type=click.Choice(['square', 'hex', 'diamond', 'triangle', 'isometric']), metavar="SHAPE", help="Choose which shape to use")
@click.option("--percent", "-p", type=click.INT, metavar="1-10", help="Use this percentage to determine number of polygons. [1-10]")
@click.option("--show", "-s", is_flag=True, help="Open the image")
@click.option("--outline", "-o", default=None, metavar="HEXCODE", help="Outline the shapes")
@click.option("--name", "-n", metavar="/path/to/output_file", help="Rename the output")
@click.option("--set-wall", "-w", is_flag=True, help="Set the generated image as your Desktop wallpaper")
def shape(image, shape, show, outline, name, percent,set_wall):
""" Generate a HQ image of a beautiful shapes """
error = None
if percent:
if percent < 1 or percent > 10:
error = "Percent range 1-10"
if error:
click.secho(error, fg='red', err=True)
sys.exit(1)
img = Image.open(image)
width = img.width
height = img.height
if outline:
try:
outline = tuple(bytes.fromhex(outline[1:]))
except Exception as e:
click.secho("Invalid color hex", fg='red', err=True)
sys.exit(1)
print("Preparing image", end="")
if shape == 'hex':
percent = percent if percent else 5
img = genHexagon(width, height, img, outline, pic=True, per=percent)
elif shape == 'square':
img = genSquares(width, height, img, outline, pic=True, per=percent)
elif shape == 'diamond':
img = genDiamond(width, height, img, outline, pic=True, per=percent)
elif shape == 'triangle':
img = genTriangle(width, height, img, outline, pic=True, per=percent)
elif shape == 'isometric':
img = genIsometric(width, height, img, outline, pic=True, per=percent)
else:
error = "No shape given. To see list of shapes \"wallgen pic shape --help\""
click.secho(error, fg='red', err=True)
sys.exit(1)
print("\r", end="")
print("Making final tweaks", end="")
if show:
img.show()
file_name = ""
if name:
file_name = "{}.png".format(name)
img.save(file_name)
else:
file_name = "wall-{}.png".format(int(time.time()))
img.save(file_name)
print("\r", end="")
print(f"Image is stored at {file_name}")
if set_wall:
setwallpaper(file_name)
if __name__ == "__main__":
cli()
| 30.108312
| 163
| 0.667698
| 1,794
| 11,953
| 4.390747
| 0.122631
| 0.050273
| 0.021582
| 0.030215
| 0.831281
| 0.789895
| 0.787483
| 0.763996
| 0.732005
| 0.709915
| 0
| 0.01341
| 0.15151
| 11,953
| 396
| 164
| 30.184343
| 0.763262
| 0.038568
| 0
| 0.744186
| 0
| 0.006645
| 0.248886
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0.006645
| 0.023256
| 0
| 0.046512
| 0.096346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d1183e6a65d183987c1254e741776022055bf2e4
| 6,791
|
py
|
Python
|
Test/plot_test.py
|
afcarl/pycm
|
f714d0ba851c07e1f23bd02bb870334f9da066dc
|
[
"MIT"
] | 1
|
2019-04-22T06:04:52.000Z
|
2019-04-22T06:04:52.000Z
|
Test/plot_test.py
|
afcarl/pycm
|
f714d0ba851c07e1f23bd02bb870334f9da066dc
|
[
"MIT"
] | null | null | null |
Test/plot_test.py
|
afcarl/pycm
|
f714d0ba851c07e1f23bd02bb870334f9da066dc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
>>> from pycm import *
>>> from matplotlib import pyplot as plt
>>> import seaborn as sns
>>> y_act = [0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2]
>>> y_pre = [0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,1,1,1,1,1,2,0,1,2,2,2,2]
>>> cm = ConfusionMatrix(y_act,y_pre)
>>> ax = cm.plot()
>>> ax.get_title()
'Confusion Matrix'
>>> ax.get_xlabel()
'Predicted Classes'
>>> ax.get_ylabel()
'Actual Classes'
>>> ax.get_xticks()
array([0, 1, 2])
>>> ax.get_xticklabels()
[Text(0, 0, '0'), Text(1, 0, '1'), Text(2, 0, '2')]
>>> ax.get_yticks()
array([0, 1, 2])
>>> ax.get_yticklabels()
[Text(0, 0, '0'), Text(0, 1, '1'), Text(0, 2, '2')]
>>> ax.texts
[]
>>> ax = cm.plot(normalized=True)
>>> ax.get_title()
'Confusion Matrix (Normalized)'
>>> ax.get_xticklabels()
[Text(0, 0, '0'), Text(1, 0, '1'), Text(2, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0, '0'), Text(0, 1, '1'), Text(0, 2, '2')]
>>> ax.texts
[]
>>> ax = cm.plot(one_vs_all=True)
>>> ax.get_title()
'Confusion Matrix'
>>> ax.get_xticklabels()
[Text(0, 0, '0'), Text(1, 0, '1'), Text(2, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0, '0'), Text(0, 1, '1'), Text(0, 2, '2')]
>>> ax.texts
[]
>>> ax = cm.plot(one_vs_all=True, class_name=0)
>>> ax.get_title()
'Confusion Matrix'
>>> ax.get_xticklabels()
[Text(0, 0, '0'), Text(1, 0, '~')]
>>> ax.get_yticklabels()
[Text(0, 0, '0'), Text(0, 1, '~')]
>>> ax.texts
[]
>>> ax = cm.plot(title="test")
>>> ax.get_title()
'test'
>>> ax.get_xticklabels()
[Text(0, 0, '0'), Text(1, 0, '1'), Text(2, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0, '0'), Text(0, 1, '1'), Text(0, 2, '2')]
>>> ax.texts
[]
>>> ax = cm.plot(number_label=True)
>>> ax.get_title()
'Confusion Matrix'
>>> ax.get_xticklabels()
[Text(0, 0, '0'), Text(1, 0, '1'), Text(2, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0, '0'), Text(0, 1, '1'), Text(0, 2, '2')]
>>> ax.texts
[Text(0, 0, '9'), Text(1, 0, '3'), Text(2, 0, '0'), Text(0, 1, '3'), Text(1, 1, '5'), Text(2, 1, '1'), Text(0, 2, '1'), Text(1, 2, '1'), Text(2, 2, '4')]
>>> ax = cm.plot(cmap=plt.cm.Blues)
>>> ax.get_title()
'Confusion Matrix'
>>> ax.get_xticklabels()
[Text(0, 0, '0'), Text(1, 0, '1'), Text(2, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0, '0'), Text(0, 1, '1'), Text(0, 2, '2')]
>>> ax.texts
[]
>>> ax = cm.plot(normalized=True, one_vs_all=True, class_name=1)
>>> ax.get_title()
'Confusion Matrix (Normalized)'
>>> ax.get_xticklabels()
[Text(0, 0, '1'), Text(1, 0, '~')]
>>> ax.get_yticklabels()
[Text(0, 0, '1'), Text(0, 1, '~')]
>>> ax.texts
[]
>>> ax = cm.plot(normalized=True, number_label=True)
>>> ax.get_title()
'Confusion Matrix (Normalized)'
>>> ax.get_xticklabels()
[Text(0, 0, '0'), Text(1, 0, '1'), Text(2, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0, '0'), Text(0, 1, '1'), Text(0, 2, '2')]
>>> ax.texts
[Text(0, 0, '0.75'), Text(1, 0, '0.25'), Text(2, 0, '0.0'), Text(0, 1, '0.33333'), Text(1, 1, '0.55556'), Text(2, 1, '0.11111'), Text(0, 2, '0.16667'), Text(1, 2, '0.16667'), Text(2, 2, '0.66667')]
>>> ax = cm.plot(normalized=True, one_vs_all=True, class_name=1, number_label=True)
>>> ax.get_title()
'Confusion Matrix (Normalized)'
>>> ax.get_xticklabels()
[Text(0, 0, '1'), Text(1, 0, '~')]
>>> ax.get_yticklabels()
[Text(0, 0, '1'), Text(0, 1, '~')]
>>> ax.texts
[Text(0, 0, '0.55556'), Text(1, 0, '0.44444'), Text(0, 1, '0.22222'), Text(1, 1, '0.77778')]
>>> ax = cm.plot(plot_lib='seaborn')
>>> ax.get_title()
'Confusion Matrix'
>>> ax.get_xlabel()
'Predicted Classes'
>>> ax.get_ylabel()
'Actual Classes'
>>> ax.get_xticks()
array([0.5, 1.5, 2.5])
>>> ax.get_xticklabels()
[Text(0.5, 0, '0'), Text(1.5, 0, '1'), Text(2.5, 0, '2')]
>>> ax.get_yticks()
array([0.5, 1.5, 2.5])
>>> ax.get_yticklabels()
[Text(0, 0.5, '0'), Text(0, 1.5, '1'), Text(0, 2.5, '2')]
>>> ax.texts
[]
>>> ax = cm.plot(normalized=True, plot_lib='seaborn')
>>> ax.get_title()
'Confusion Matrix (Normalized)'
>>> ax.get_xticklabels()
[Text(0.5, 0, '0'), Text(1.5, 0, '1'), Text(2.5, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0.5, '0'), Text(0, 1.5, '1'), Text(0, 2.5, '2')]
>>> ax.texts
[]
>>> ax = cm.plot(one_vs_all=True, plot_lib='seaborn')
>>> ax.get_title()
'Confusion Matrix'
>>> ax.get_xticklabels()
[Text(0.5, 0, '0'), Text(1.5, 0, '1'), Text(2.5, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0.5, '0'), Text(0, 1.5, '1'), Text(0, 2.5, '2')]
>>> ax.texts
[]
>>> ax = cm.plot(one_vs_all=True, class_name=0, plot_lib='seaborn')
>>> ax.get_title()
'Confusion Matrix'
>>> ax.get_xticklabels()
[Text(0.5, 0, '0'), Text(1.5, 0, '~')]
>>> ax.get_yticklabels()
[Text(0, 0.5, '0'), Text(0, 1.5, '~')]
>>> ax.texts
[]
>>> ax = cm.plot(title="test", plot_lib='seaborn')
>>> ax.get_title()
'test'
>>> ax.get_xticklabels()
[Text(0.5, 0, '0'), Text(1.5, 0, '1'), Text(2.5, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0.5, '0'), Text(0, 1.5, '1'), Text(0, 2.5, '2')]
>>> ax.texts
[]
>>> ax = cm.plot(number_label=True, plot_lib='seaborn')
>>> ax.get_title()
'Confusion Matrix'
>>> ax.get_xticklabels()
[Text(0.5, 0, '0'), Text(1.5, 0, '1'), Text(2.5, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0.5, '0'), Text(0, 1.5, '1'), Text(0, 2.5, '2')]
>>> ax.texts
[Text(0.5, 0.5, '9'), Text(1.5, 0.5, '3'), Text(2.5, 0.5, '0'), Text(0.5, 1.5, '3'), Text(1.5, 1.5, '5'), Text(2.5, 1.5, '1'), Text(0.5, 2.5, '1'), Text(1.5, 2.5, '1'), Text(2.5, 2.5, '4')]
>>> ax = cm.plot(cmap=plt.cm.Blues, plot_lib='seaborn')
>>> ax.get_title()
'Confusion Matrix'
>>> ax.get_xticklabels()
[Text(0.5, 0, '0'), Text(1.5, 0, '1'), Text(2.5, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0.5, '0'), Text(0, 1.5, '1'), Text(0, 2.5, '2')]
>>> ax.texts
[]
>>> ax = cm.plot(normalized=True, one_vs_all=True, class_name=1, plot_lib='seaborn')
>>> ax.get_title()
'Confusion Matrix (Normalized)'
>>> ax.get_xticklabels()
[Text(0.5, 0, '1'), Text(1.5, 0, '~')]
>>> ax.get_yticklabels()
[Text(0, 0.5, '1'), Text(0, 1.5, '~')]
>>> ax.texts
[]
>>> ax = cm.plot(normalized=True, number_label=True, plot_lib='seaborn')
>>> ax.get_title()
'Confusion Matrix (Normalized)'
>>> ax.get_xticklabels()
[Text(0.5, 0, '0'), Text(1.5, 0, '1'), Text(2.5, 0, '2')]
>>> ax.get_yticklabels()
[Text(0, 0.5, '0'), Text(0, 1.5, '1'), Text(0, 2.5, '2')]
>>> ax.texts
[Text(0.5, 0.5, '0.75'), Text(1.5, 0.5, '0.25'), Text(2.5, 0.5, '0.0'), Text(0.5, 1.5, '0.33333'), Text(1.5, 1.5, '0.55556'), Text(2.5, 1.5, '0.11111'), Text(0.5, 2.5, '0.16667'), Text(1.5, 2.5, '0.16667'), Text(2.5, 2.5, '0.66667')]
>>> ax = cm.plot(normalized=True, one_vs_all=True, class_name=1, number_label=True, plot_lib='seaborn')
>>> ax.get_title()
'Confusion Matrix (Normalized)'
>>> ax.get_xticklabels()
[Text(0.5, 0, '1'), Text(1.5, 0, '~')]
>>> ax.get_yticklabels()
[Text(0, 0.5, '1'), Text(0, 1.5, '~')]
>>> ax.texts
[Text(0.5, 0.5, '0.55556'), Text(1.5, 0.5, '0.44444'), Text(0.5, 1.5, '0.22222'), Text(1.5, 1.5, '0.77778')]
"""
| 33.126829
| 233
| 0.553085
| 1,306
| 6,791
| 2.79173
| 0.04977
| 0.123423
| 0.030444
| 0.109709
| 0.911135
| 0.88508
| 0.845858
| 0.835985
| 0.812671
| 0.793472
| 0
| 0.123043
| 0.125166
| 6,791
| 205
| 234
| 33.126829
| 0.490658
| 0.998675
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d1371949e6884802833824e8a3c8dd268160c833
| 117,237
|
py
|
Python
|
h1/api/storage_project_disk_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/api/storage_project_disk_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/api/storage_project_disk_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.api_client import ApiClient, Endpoint as _Endpoint
from h1.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from h1.model.disk import Disk
from h1.model.event import Event
from h1.model.inline_response400 import InlineResponse400
from h1.model.metric import Metric
from h1.model.point import Point
from h1.model.resource_service import ResourceService
from h1.model.storage_project_disk_create import StorageProjectDiskCreate
from h1.model.storage_project_disk_resize import StorageProjectDiskResize
from h1.model.storage_project_disk_transfer import StorageProjectDiskTransfer
from h1.model.storage_project_disk_update import StorageProjectDiskUpdate
from h1.model.tag import Tag
from h1.model.tag_array import TagArray
class StorageProjectDiskApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __storage_project_disk_create(
self,
project_id,
location_id,
storage_project_disk_create,
**kwargs
):
"""Create storage/disk # noqa: E501
Create disk # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_create(project_id, location_id, storage_project_disk_create, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
storage_project_disk_create (StorageProjectDiskCreate):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Disk
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['storage_project_disk_create'] = \
storage_project_disk_create
return self.call_with_http_info(**kwargs)
self.storage_project_disk_create = _Endpoint(
settings={
'response_type': (Disk,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk',
'operation_id': 'storage_project_disk_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'storage_project_disk_create',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'storage_project_disk_create',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'storage_project_disk_create':
(StorageProjectDiskCreate,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'storage_project_disk_create': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__storage_project_disk_create
)
def __storage_project_disk_delete(
self,
project_id,
location_id,
disk_id,
**kwargs
):
"""Delete storage/disk # noqa: E501
Delete disk # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_delete(project_id, location_id, disk_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}',
'operation_id': 'storage_project_disk_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_delete
)
def __storage_project_disk_detach(
self,
project_id,
location_id,
disk_id,
**kwargs
):
"""Detach storage/disk # noqa: E501
action detach # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_detach(project_id, location_id, disk_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Disk
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_detach = _Endpoint(
settings={
'response_type': (Disk,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/actions/detach',
'operation_id': 'storage_project_disk_detach',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'disk_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_detach
)
def __storage_project_disk_download(
self,
project_id,
location_id,
disk_id,
**kwargs
):
"""Download storage/disk # noqa: E501
action download # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_download(project_id, location_id, disk_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_download = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/actions/download',
'operation_id': 'storage_project_disk_download',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'disk_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_download
)
def __storage_project_disk_event_get(
self,
project_id,
location_id,
disk_id,
event_id,
**kwargs
):
"""Get storage/disk.event # noqa: E501
Get storage/disk.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_event_get(project_id, location_id, disk_id, event_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
event_id (str): eventId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Event
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['event_id'] = \
event_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_event_get = _Endpoint(
settings={
'response_type': (Event,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/event/{eventId}',
'operation_id': 'storage_project_disk_event_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'event_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
'event_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'event_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'event_id': 'eventId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'event_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_event_get
)
def __storage_project_disk_event_list(
self,
project_id,
location_id,
disk_id,
**kwargs
):
"""List storage/disk.event # noqa: E501
List storage/disk.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_event_list(project_id, location_id, disk_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
Keyword Args:
limit (float): $limit. [optional] if omitted the server will use the default value of 100
skip (float): $skip. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Event]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_event_list = _Endpoint(
settings={
'response_type': ([Event],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/event',
'operation_id': 'storage_project_disk_event_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'limit',
'skip',
],
'required': [
'project_id',
'location_id',
'disk_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'limit',
]
},
root_map={
'validations': {
('limit',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'limit':
(float,),
'skip':
(float,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'limit': '$limit',
'skip': '$skip',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'limit': 'query',
'skip': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_event_list
)
def __storage_project_disk_get(
self,
project_id,
location_id,
disk_id,
**kwargs
):
"""Get storage/disk # noqa: E501
Returns a single disk # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_get(project_id, location_id, disk_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Disk
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_get = _Endpoint(
settings={
'response_type': (Disk,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}',
'operation_id': 'storage_project_disk_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_get
)
def __storage_project_disk_list(
self,
project_id,
location_id,
**kwargs
):
"""List storage/disk # noqa: E501
List disk # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_list(project_id, location_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
Keyword Args:
name (str): Filter by name. [optional]
vm (str): Filter by vm. [optional]
tag_value (str): Filter by tag.value. [optional]
tag_key (str): Filter by tag.key. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Disk]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_list = _Endpoint(
settings={
'response_type': ([Disk],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk',
'operation_id': 'storage_project_disk_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'name',
'vm',
'tag_value',
'tag_key',
],
'required': [
'project_id',
'location_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'name':
(str,),
'vm':
(str,),
'tag_value':
(str,),
'tag_key':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'name': 'name',
'vm': 'vm',
'tag_value': 'tag.value',
'tag_key': 'tag.key',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'name': 'query',
'vm': 'query',
'tag_value': 'query',
'tag_key': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_list
)
def __storage_project_disk_metric_get(
self,
project_id,
location_id,
disk_id,
metric_id,
**kwargs
):
"""Get storage/disk.metric # noqa: E501
Get storage/disk.metric # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_metric_get(project_id, location_id, disk_id, metric_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
metric_id (str): metricId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Metric
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['metric_id'] = \
metric_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_metric_get = _Endpoint(
settings={
'response_type': (Metric,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/metric/{metricId}',
'operation_id': 'storage_project_disk_metric_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'metric_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
'metric_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'metric_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'metric_id': 'metricId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'metric_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_metric_get
)
def __storage_project_disk_metric_list(
self,
project_id,
location_id,
disk_id,
**kwargs
):
"""List storage/disk.metric # noqa: E501
List storage/disk.metric # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_metric_list(project_id, location_id, disk_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Metric]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_metric_list = _Endpoint(
settings={
'response_type': ([Metric],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/metric',
'operation_id': 'storage_project_disk_metric_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_metric_list
)
def __storage_project_disk_metric_point_list(
self,
project_id,
location_id,
disk_id,
metric_id,
**kwargs
):
"""List storage/disk.point # noqa: E501
List storage/disk.point # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_metric_point_list(project_id, location_id, disk_id, metric_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
metric_id (str): metricId
Keyword Args:
interval (str): interval. [optional]
timespan (str): timespan. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Point]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['metric_id'] = \
metric_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_metric_point_list = _Endpoint(
settings={
'response_type': ([Point],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/metric/{metricId}/point',
'operation_id': 'storage_project_disk_metric_point_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'metric_id',
'interval',
'timespan',
],
'required': [
'project_id',
'location_id',
'disk_id',
'metric_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'metric_id':
(str,),
'interval':
(str,),
'timespan':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'metric_id': 'metricId',
'interval': 'interval',
'timespan': 'timespan',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'metric_id': 'path',
'interval': 'query',
'timespan': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_metric_point_list
)
def __storage_project_disk_resize(
self,
project_id,
location_id,
disk_id,
storage_project_disk_resize,
**kwargs
):
"""Resize storage/disk # noqa: E501
action resize # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_resize(project_id, location_id, disk_id, storage_project_disk_resize, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
storage_project_disk_resize (StorageProjectDiskResize):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Disk
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['storage_project_disk_resize'] = \
storage_project_disk_resize
return self.call_with_http_info(**kwargs)
self.storage_project_disk_resize = _Endpoint(
settings={
'response_type': (Disk,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/actions/resize',
'operation_id': 'storage_project_disk_resize',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'storage_project_disk_resize',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'disk_id',
'storage_project_disk_resize',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'storage_project_disk_resize':
(StorageProjectDiskResize,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'storage_project_disk_resize': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__storage_project_disk_resize
)
def __storage_project_disk_service_get(
self,
project_id,
location_id,
disk_id,
service_id,
**kwargs
):
"""Get storage/disk.service # noqa: E501
Get storage/disk.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_service_get(project_id, location_id, disk_id, service_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
service_id (str): serviceId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ResourceService
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['service_id'] = \
service_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_service_get = _Endpoint(
settings={
'response_type': (ResourceService,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/service/{serviceId}',
'operation_id': 'storage_project_disk_service_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'service_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
'service_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'service_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'service_id': 'serviceId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'service_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_service_get
)
def __storage_project_disk_service_list(
self,
project_id,
location_id,
disk_id,
**kwargs
):
"""List storage/disk.service # noqa: E501
List storage/disk.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_service_list(project_id, location_id, disk_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[ResourceService]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_service_list = _Endpoint(
settings={
'response_type': ([ResourceService],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/service',
'operation_id': 'storage_project_disk_service_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_service_list
)
def __storage_project_disk_tag_create(
self,
project_id,
location_id,
disk_id,
tag,
**kwargs
):
"""Create storage/disk.tag # noqa: E501
Create storage/disk.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_tag_create(project_id, location_id, disk_id, tag, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
tag (Tag):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['tag'] = \
tag
return self.call_with_http_info(**kwargs)
self.storage_project_disk_tag_create = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/tag',
'operation_id': 'storage_project_disk_tag_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'tag',
],
'required': [
'project_id',
'location_id',
'disk_id',
'tag',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'tag':
(Tag,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'tag': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__storage_project_disk_tag_create
)
def __storage_project_disk_tag_delete(
self,
project_id,
location_id,
disk_id,
tag_id,
**kwargs
):
"""Delete storage/disk.tag # noqa: E501
Delete storage/disk.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_tag_delete(project_id, location_id, disk_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_tag_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/tag/{tagId}',
'operation_id': 'storage_project_disk_tag_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_tag_delete
)
def __storage_project_disk_tag_get(
self,
project_id,
location_id,
disk_id,
tag_id,
**kwargs
):
"""Get storage/disk.tag # noqa: E501
Get storage/disk.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_tag_get(project_id, location_id, disk_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_tag_get = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/tag/{tagId}',
'operation_id': 'storage_project_disk_tag_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_tag_get
)
def __storage_project_disk_tag_list(
self,
project_id,
location_id,
disk_id,
**kwargs
):
"""List storage/disk.tag # noqa: E501
List storage/disk.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_tag_list(project_id, location_id, disk_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Tag]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_tag_list = _Endpoint(
settings={
'response_type': ([Tag],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/tag',
'operation_id': 'storage_project_disk_tag_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_tag_list
)
def __storage_project_disk_tag_put(
self,
project_id,
location_id,
disk_id,
tag_array,
**kwargs
):
"""Replace storage/disk.tag # noqa: E501
Replace storage/disk.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_tag_put(project_id, location_id, disk_id, tag_array, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
tag_array (TagArray):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Tag]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['tag_array'] = \
tag_array
return self.call_with_http_info(**kwargs)
self.storage_project_disk_tag_put = _Endpoint(
settings={
'response_type': ([Tag],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/tag',
'operation_id': 'storage_project_disk_tag_put',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'tag_array',
],
'required': [
'project_id',
'location_id',
'disk_id',
'tag_array',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'tag_array':
(TagArray,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'tag_array': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__storage_project_disk_tag_put
)
def __storage_project_disk_transfer(
self,
project_id,
location_id,
disk_id,
storage_project_disk_transfer,
**kwargs
):
"""Transfer storage/disk # noqa: E501
action transfer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_transfer(project_id, location_id, disk_id, storage_project_disk_transfer, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
storage_project_disk_transfer (StorageProjectDiskTransfer):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Disk
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['storage_project_disk_transfer'] = \
storage_project_disk_transfer
return self.call_with_http_info(**kwargs)
self.storage_project_disk_transfer = _Endpoint(
settings={
'response_type': (Disk,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/actions/transfer',
'operation_id': 'storage_project_disk_transfer',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'storage_project_disk_transfer',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'disk_id',
'storage_project_disk_transfer',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'storage_project_disk_transfer':
(StorageProjectDiskTransfer,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'storage_project_disk_transfer': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__storage_project_disk_transfer
)
def __storage_project_disk_update(
self,
project_id,
location_id,
disk_id,
storage_project_disk_update,
**kwargs
):
"""Update storage/disk # noqa: E501
Returns modified disk # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_update(project_id, location_id, disk_id, storage_project_disk_update, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
storage_project_disk_update (StorageProjectDiskUpdate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Disk
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['storage_project_disk_update'] = \
storage_project_disk_update
return self.call_with_http_info(**kwargs)
self.storage_project_disk_update = _Endpoint(
settings={
'response_type': (Disk,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}',
'operation_id': 'storage_project_disk_update',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'storage_project_disk_update',
],
'required': [
'project_id',
'location_id',
'disk_id',
'storage_project_disk_update',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'storage_project_disk_update':
(StorageProjectDiskUpdate,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'storage_project_disk_update': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__storage_project_disk_update
)
| 36.774467
| 139
| 0.441814
| 9,815
| 117,237
| 4.978706
| 0.023128
| 0.042545
| 0.053411
| 0.040826
| 0.952666
| 0.929747
| 0.91049
| 0.909283
| 0.893791
| 0.892707
| 0
| 0.003074
| 0.47282
| 117,237
| 3,187
| 140
| 36.786006
| 0.787574
| 0.286906
| 0
| 0.733717
| 1
| 0
| 0.235673
| 0.051887
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009747
| false
| 0
| 0.007089
| 0
| 0.026584
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d16552c0a9535e1c0bd7f701987301681832eba5
| 142
|
py
|
Python
|
fastapi/requests.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 53,007
|
2018-12-08T10:05:29.000Z
|
2022-03-31T23:30:02.000Z
|
fastapi/requests.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 4,155
|
2019-01-05T05:07:49.000Z
|
2022-03-31T21:25:38.000Z
|
fastapi/requests.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 4,092
|
2018-12-09T16:21:00.000Z
|
2022-03-31T07:59:45.000Z
|
from starlette.requests import HTTPConnection as HTTPConnection # noqa: F401
from starlette.requests import Request as Request # noqa: F401
| 47.333333
| 77
| 0.816901
| 18
| 142
| 6.444444
| 0.5
| 0.224138
| 0.362069
| 0.465517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04918
| 0.140845
| 142
| 2
| 78
| 71
| 0.901639
| 0.147887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0f3e70475df360798f7a05134870ab10e9ae2752
| 17,822
|
py
|
Python
|
tests/regression_tests.py
|
gf712/PyML
|
83849ff6e1c1bbcf9b87826ef673fb34622a0539
|
[
"MIT"
] | 4
|
2018-11-23T07:45:51.000Z
|
2020-12-29T19:32:06.000Z
|
tests/regression_tests.py
|
gf712/PyML
|
83849ff6e1c1bbcf9b87826ef673fb34622a0539
|
[
"MIT"
] | 18
|
2017-11-16T08:32:42.000Z
|
2017-12-27T15:19:58.000Z
|
tests/regression_tests.py
|
gf712/PyML
|
83849ff6e1c1bbcf9b87826ef673fb34622a0539
|
[
"MIT"
] | 3
|
2018-10-31T17:09:22.000Z
|
2020-03-02T22:40:23.000Z
|
import unittest
from pyml.linear_models import LinearRegression, LogisticRegression
from pyml.linear_models.base import LinearBase
from pyml.datasets import regression, gaussian
from pyml.preprocessing import train_test_split
class LinearRegressionGradientDescentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = regression(100, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.regressor = LinearRegression(seed=1970, solver='gradient_descent')
cls.regressor.train(X=cls.X_train, y=cls.y_train)
def test_LinR_iterations(self):
self.assertEqual(self.regressor.iterations, 7)
def test_LinR_coefficients(self):
self.assertAlmostEqual(self.regressor.coefficients[0], 0.4907136205265401, delta=0.001)
self.assertAlmostEqual(self.regressor.coefficients[1], 0.9034467828351432, delta=0.001)
def test_LinR_cost(self):
self.assertAlmostEqual(self.regressor.cost[0], 3.5181936893597365, delta=0.001)
self.assertAlmostEqual(self.regressor.cost[-1], 0.4868770157376261, delta=0.001)
def test_LinR_predict(self):
self.assertAlmostEqual(self.regressor.predict(self.X_test)[0], 3.8176098320897065, delta=0.001)
def test_LinR_train_predict(self):
self.assertAlmostEqual(self.regressor.train_predict(self.X_train, self.y_train)[0], 9.770956237446251,
delta=0.001)
def test_LinR_mse(self):
self.assertAlmostEqual(self.regressor.score(self.X_test, self.y_test), 1.3280324597827904, delta=0.001)
def test_LinR_mae(self):
self.assertAlmostEqual(self.regressor.score(self.X_test, self.y_test, scorer='mae'),
0.9126392424298799, delta=0.001)
def test_LinR_seed(self):
self.assertEqual(self.regressor.seed, 1970)
def test_LinR_solver_error(self):
self.assertRaises(ValueError, LinearRegression, 1970, True, 'unknown_solver')
class GradientDescentTest(unittest.TestCase):
def test_GD_opt_InitError(self):
self.assertRaises(ValueError, LinearBase, 0.01, 0.01, 10, 0.9, 64, 0.1, 'amazing_optimiser_algo', None,
'regressor')
class LinearRegressionOLSTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = regression(100, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.regressor = LinearRegression(seed=1970, solver='OLS')
cls.regressor.train(X=cls.X_train, y=cls.y_train)
def test_OLS_coefficients(self):
self.assertAlmostEqual(self.regressor.coefficients[0], 0.518888884839874, delta=0.001)
self.assertAlmostEqual(self.regressor.coefficients[1], 0.9128356664164721, delta=0.001)
def test_OLS_predict(self):
self.assertAlmostEqual(self.regressor.predict(self.X_test)[0], 3.880359176261411, delta=0.001)
def test_OLS_mse(self):
self.assertAlmostEqual(self.regressor.score(self.X_test, self.y_test), 1.34151578011058, delta=0.001)
class LogisticRegressionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = X, y = gaussian(labels=2, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970)
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_LogR_iterations(self):
self.assertEqual(self.classifier.iterations, 1623)
def test_LogR_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0], -1.1576475345638408, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1], 0.1437129269620468, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2], 2.4464052394504856, delta=0.001)
def test_LogR_cost(self):
self.assertAlmostEqual(self.classifier.cost[0], -106.11158912690777, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[-1], -61.15035744417768, delta=0.001)
def test_LogR_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_LogR_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0], 0.807766417948826, delta=0.001)
def test_LogR_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.975, delta=0.001)
def test_LogR_seed(self):
self.assertEqual(self.classifier.seed, 1970)
class MultiClassLogisticRegressionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = X, y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970)
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogR_iterations(self):
self.assertEqual(self.classifier.iterations[0], 3829)
self.assertEqual(self.classifier.iterations[1], 4778)
self.assertEqual(self.classifier.iterations[2], 3400)
def test_MLogR_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -2.504659172303325, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 0.9999686753579901, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 0.5430990877594853, delta=0.001)
def test_MLogR_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -79.28190020206335, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -110.82234100438215, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -77.51659078552537, delta=0.001)
def test_MLogR_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogR_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.18176321188156466, delta=0.001)
def test_MLogR_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9666666666666667, delta=0.001)
def test_MLogR_seed(self):
self.assertEqual(self.classifier.seed, 1970)
class MultiClassLogisticRegressionwithMomentumTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = X, y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, alpha=0.9)
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRMom_iterations(self):
self.assertEqual(self.classifier.iterations[0], 1671)
self.assertEqual(self.classifier.iterations[1], 1691)
self.assertEqual(self.classifier.iterations[2], 1546)
def test_MLogRMom_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -6.179813361986948, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 3.915365241814121, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 1.4391187417309603, delta=0.001)
def test_MLogRMom_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -38.80552082812185, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -71.11678230563942, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -39.44585417456268, delta=0.001)
def test_MLogRMom_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRMom_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.04680865754859053, delta=0.001)
def test_MLogRMom_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
class MultiClassLogisticRegressionMiniBatch(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls .X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, alpha=0.9, batch_size=64)
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRMin_iterations(self):
self.assertEqual(self.classifier.iterations[0], 905)
self.assertEqual(self.classifier.iterations[1], 789)
self.assertEqual(self.classifier.iterations[2], 841)
def test_MLogRMin_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -8.55590366737834, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 5.300981091494979, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 1.9547910473910273, delta=0.001)
def test_MLogRMin_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -28.947086360092676, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -63.442959967464574, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -30.33741258448764, delta=0.001)
def test_MLogRMin_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRMin_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.017049079187284634, delta=0.001)
def test_MLogRMin_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
class MultiClassLogisticRegressionNesterovOpt(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls .X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, alpha=0.9, method='nesterov')
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRNesOpt_iterations(self):
self.assertEqual(self.classifier.iterations[0], 1673)
self.assertEqual(self.classifier.iterations[1], 1692)
self.assertEqual(self.classifier.iterations[2], 1548)
def test_MLogRNesOpt_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -6.180431021808369, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 3.914247513063426, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 1.4391579779616654, delta=0.001)
def test_MLogRNesOpt_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -38.80233474838201, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -71.1246527942097, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -39.44375056521835, delta=0.001)
def test_MLogRNesOpt_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRNesOpt_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.046812477405301665, delta=0.001)
def test_MLogRNesOpt_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
class MultiClassLogisticRegressionAdagradOpt(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls .X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, alpha=0.98, method='adagrad')
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRAdagradOpt_iterations(self):
self.assertEqual(self.classifier.iterations[0], 1187)
self.assertEqual(self.classifier.iterations[1], 317)
self.assertEqual(self.classifier.iterations[2], 436)
def test_MLogRAdagradOpt_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -7.3539416411807474, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 6.203333163198617, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 3.2839577904188673, delta=0.001)
def test_MLogRAdagradOpt_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -33.083261965268775, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -60.5014103879351, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -26.87955154897393, delta=0.001)
def test_MLogRAdagradOpt_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRAdagradOpt_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.01683721954111243, delta=0.001)
def test_MLogRAdagradOpt_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
class MultiClassLogisticRegressionAdadeltaOpt(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls .X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, learning_rate=1, alpha=0.93, method='adadelta')
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRAdagradOpt_iterations(self):
self.assertEqual(self.classifier.iterations[0], 50)
self.assertEqual(self.classifier.iterations[1], 31)
self.assertEqual(self.classifier.iterations[2], 59)
def test_MLogRAdagradOpt_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -17.69287148773692, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 8.833657315503745, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 3.612736014955706, delta=0.001)
def test_MLogRAdagradOpt_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -19.461391357431822, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -58.89195462566389, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -22.090057493027633, delta=0.001)
def test_MLogRAdagradOpt_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRAdagradOpt_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.0008593472329257797, delta=0.001)
def test_MLogRAdagradOpt_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
class MultiClassLogisticRegressionRMSpropOpt(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls .X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, learning_rate=1, alpha=0.99, method='rmsprop')
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRAdagradOpt_iterations(self):
self.assertEqual(self.classifier.iterations[0], 212)
self.assertEqual(self.classifier.iterations[1], 32)
self.assertEqual(self.classifier.iterations[2], 71)
def test_MLogRAdagradOpt_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -15.450318804509942, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 9.072690798494117, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 4.915283731375461, delta=0.001)
def test_MLogRAdagradOpt_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -19.841625448780448, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -58.50817514694194, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -21.77574545155869, delta=0.001)
def test_MLogRAdagradOpt_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRAdagradOpt_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.0009482861157384186, delta=0.001)
def test_MLogRAdagradOpt_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
| 49.643454
| 116
| 0.693805
| 2,267
| 17,822
| 5.326864
| 0.096162
| 0.110136
| 0.155267
| 0.182594
| 0.810782
| 0.800182
| 0.704786
| 0.696837
| 0.674975
| 0.618334
| 0
| 0.135691
| 0.181237
| 17,822
| 358
| 117
| 49.782123
| 0.691886
| 0
| 0
| 0.361217
| 0
| 0
| 0.005443
| 0.001234
| 0
| 0
| 0
| 0
| 0.422053
| 1
| 0.277567
| false
| 0
| 0.019011
| 0
| 0.338403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7e16de0aa3cf48c1190b720bbb78c8f0c466d307
| 298
|
py
|
Python
|
ProyectoFinal/usuarios/forms.py
|
PredadorAkrid/IS-2020-2-La-Orden-De-Turing
|
cc292723a7bc4e4c1f848d00484f62ac75e7ad20
|
[
"Apache-2.0"
] | null | null | null |
ProyectoFinal/usuarios/forms.py
|
PredadorAkrid/IS-2020-2-La-Orden-De-Turing
|
cc292723a7bc4e4c1f848d00484f62ac75e7ad20
|
[
"Apache-2.0"
] | null | null | null |
ProyectoFinal/usuarios/forms.py
|
PredadorAkrid/IS-2020-2-La-Orden-De-Turing
|
cc292723a7bc4e4c1f848d00484f62ac75e7ad20
|
[
"Apache-2.0"
] | null | null | null |
"""Users forms."""
# Django
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from .models import *
| 29.8
| 74
| 0.822148
| 38
| 298
| 6.447368
| 0.315789
| 0.204082
| 0.277551
| 0.342857
| 0.261224
| 0.261224
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104027
| 298
| 9
| 75
| 33.111111
| 0.917603
| 0.067114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7d05b88855588cb8410b26233b2a89d79d3ac47e
| 125,035
|
py
|
Python
|
tacker/tests/unit/vnflcm/test_controller.py
|
SSU-DCN/tacker
|
d886ac7fec3d9cf6e0cefc5d2fa89a733a5255ae
|
[
"Apache-2.0"
] | null | null | null |
tacker/tests/unit/vnflcm/test_controller.py
|
SSU-DCN/tacker
|
d886ac7fec3d9cf6e0cefc5d2fa89a733a5255ae
|
[
"Apache-2.0"
] | null | null | null |
tacker/tests/unit/vnflcm/test_controller.py
|
SSU-DCN/tacker
|
d886ac7fec3d9cf6e0cefc5d2fa89a733a5255ae
|
[
"Apache-2.0"
] | 1
|
2020-11-16T02:14:35.000Z
|
2020-11-16T02:14:35.000Z
|
# Copyright (C) 2020 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import codecs
import ddt
from http import client as http_client
import json
import os
from unittest import mock
import urllib
import webob
from webob import exc
from oslo_serialization import jsonutils
from tacker.api.vnflcm.v1 import controller
from tacker.api.vnflcm.v1 import sync_resource
from tacker.common import exceptions
import tacker.conductor.conductorrpc.vnf_lcm_rpc as vnf_lcm_rpc
from tacker import context
import tacker.db.vnfm.vnfm_db
from tacker.extensions import nfvo
from tacker.extensions import vnfm
from tacker.manager import TackerManager
from tacker import objects
from tacker.objects import fields
from tacker.tests import constants
from tacker.tests.unit import base
from tacker.tests.unit.db import utils
from tacker.tests.unit import fake_request
import tacker.tests.unit.nfvo.test_nfvo_plugin as test_nfvo_plugin
from tacker.tests.unit.vnflcm import fakes
from tacker.tests import uuidsentinel
import tacker.vnfm.nfvo_client as nfvo_client
from tacker.vnfm import vim_client
class FakeVimClient(mock.Mock):
pass
def _get_template(name):
filename = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../etc/samples/' + str(name)))
f = codecs.open(filename, encoding='utf-8', errors='strict')
return f.read()
class FakeVNFMPlugin(mock.Mock):
def __init__(self):
super(FakeVNFMPlugin, self).__init__()
self.vnf1_vnfd_id = 'eb094833-995e-49f0-a047-dfb56aaf7c4e'
self.vnf1_vnf_id = '91e32c20-6d1f-47a4-9ba7-08f5e5effe07'
self.vnf1_update_vnf_id = '91e32c20-6d1f-47a4-9ba7-08f5e5effaf6'
self.vnf2_vnfd_id = 'e4015e9f-1ef2-49fb-adb6-070791ad3c45'
self.vnf3_vnfd_id = 'e4015e9f-1ef2-49fb-adb6-070791ad3c45'
self.vnf3_vnf_id = '7168062e-9fa1-4203-8cb7-f5c99ff3ee1b'
self.vnf3_update_vnf_id = '10f66bc5-b2f1-45b7-a7cd-6dd6ad0017f5'
self.cp11_id = 'd18c8bae-898a-4932-bff8-d5eac981a9c9'
self.cp11_update_id = 'a18c8bae-898a-4932-bff8-d5eac981a9b8'
self.cp12_id = 'c8906342-3e30-4b2a-9401-a251a7a9b5dd'
self.cp12_update_id = 'b8906342-3e30-4b2a-9401-a251a7a9b5cc'
self.cp32_id = '3d1bd2a2-bf0e-44d1-87af-a2c6b2cad3ed'
self.cp32_update_id = '064c0d99-5a61-4711-9597-2a44dc5da14b'
def get_vnfd(self, *args, **kwargs):
if 'VNF1' in args:
return {'id': self.vnf1_vnfd_id,
'name': 'VNF1',
'attributes': {'vnfd': _get_template(
'test-nsd-vnfd1.yaml')}}
elif 'VNF2' in args:
return {'id': self.vnf3_vnfd_id,
'name': 'VNF2',
'attributes': {'vnfd': _get_template(
'test-nsd-vnfd2.yaml')}}
def get_vnfds(self, *args, **kwargs):
if {'name': ['VNF1']} in args:
return [{'id': self.vnf1_vnfd_id}]
elif {'name': ['VNF3']} in args:
return [{'id': self.vnf3_vnfd_id}]
else:
return []
def get_vnfs(self, *args, **kwargs):
if {'vnfd_id': [self.vnf1_vnfd_id]} in args:
return [{'id': self.vnf1_vnf_id}]
elif {'vnfd_id': [self.vnf3_vnfd_id]} in args:
return [{'id': self.vnf3_vnf_id}]
else:
return None
def get_vnf(self, *args, **kwargs):
if self.vnf1_vnf_id in args:
return self.get_dummy_vnf_error()
elif self.vnf3_vnf_id in args:
return self.get_dummy_vnf_not_error()
else:
return self.get_dummy_vnf_active()
def get_vnf_resources(self, *args, **kwargs):
if self.vnf1_vnf_id in args:
return self.get_dummy_vnf1_details()
elif self.vnf1_update_vnf_id in args:
return self.get_dummy_vnf1_update_details()
elif self.vnf3_vnf_id in args:
return self.get_dummy_vnf3_details()
elif self.vnf3_update_vnf_id in args:
return self.get_dummy_vnf3_update_details()
def get_dummy_vnf1_details(self):
return [{'name': 'CP11', 'id': self.cp11_id},
{'name': 'CP12', 'id': self.cp12_id}]
def get_dummy_vnf1_update_details(self):
return [{'name': 'CP11', 'id': self.cp11_update_id},
{'name': 'CP12', 'id': self.cp12_update_id}]
def get_dummy_vnf3_details(self):
return [{'name': 'CP32', 'id': self.cp32_id}]
def get_dummy_vnf3_update_details(self):
return [{'name': 'CP32', 'id': self.cp32_update_id}]
def get_dummy_vnf_active(self):
return {'tenant_id': uuidsentinel.tenant_id,
'name': "fake_name",
'vnfd_id': uuidsentinel.vnfd_id,
'vnf_instance_id': uuidsentinel.instance_id,
'mgmt_ip_address': "fake_mgmt_ip_address",
'status': 'ACTIVE',
'description': 'fake_description',
'placement_attr': 'fake_placement_attr',
'vim_id': 'uuidsentinel.vim_id',
'error_reason': 'fake_error_reason',
'attributes': {
"scale_group": '{"scaleGroupDict":' +
'{"SP1": {"maxLevel" : 3}}}'}}
def get_dummy_vnf_error(self):
return {'tenant_id': uuidsentinel.tenant_id,
'name': "fake_name",
'vnfd_id': uuidsentinel.vnfd_id,
'vnf_instance_id': uuidsentinel.instance_id,
'mgmt_ip_address': "fake_mgmt_ip_address",
'status': 'ERROR',
'description': 'fake_description',
'placement_attr': 'fake_placement_attr',
'vim_id': 'uuidsentinel.vim_id',
'error_reason': 'fake_error_reason',
'attributes': {
"scale_group": '{"scaleGroupDict":' +
'{"SP1": {"maxLevel" : 3}}}'}}
def get_dummy_vnf_not_error(self):
msg = _('VNF %(vnf_id)s could not be found')
raise vnfm.VNFNotFound(explanation=msg)
@ddt.ddt
class TestController(base.TestCase):
def setUp(self):
super(TestController, self).setUp()
self.patcher = mock.patch(
'tacker.manager.TackerManager.get_service_plugins',
return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
self.mock_manager = self.patcher.start()
self.controller = controller.VnfLcmController()
self.vim_info = {
'vim_id': uuidsentinel.vnfd_id,
'vim_type': 'test',
'vim_auth': {'username': 'test', 'password': 'test'},
'placement_attr': {'region': 'TestRegionOne'},
'tenant': 'test'
}
self.context = context.get_admin_context()
with mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, 'get_vnfs',
return_value=[]):
with mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()}):
self.controller = controller.VnfLcmController()
def tearDown(self):
self.mock_manager.stop()
super(TestController, self).tearDown()
@property
def app(self):
return fakes.wsgi_app_v1()
def _get_dummy_vnf(self, vnf_id=None, status=None):
vnf_dict = utils.get_dummy_vnf()
if status:
vnf_dict['status'] = status
if vnf_id:
vnf_dict['id'] = vnf_id
return vnf_dict
def _make_problem_detail(
self,
detail,
status,
title=None,
type=None,
instance=None):
res = webob.Response(content_type='application/problem+json')
problemDetails = {}
if type:
problemDetails['type'] = type
if title:
problemDetails['title'] = title
problemDetails['detail'] = detail
problemDetails['status'] = status
if instance:
problemDetails['instance'] = instance
res.text = json.dumps(problemDetails)
res.status_int = status
return res
@mock.patch.object(vim_client.VimClient, "get_vim")
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._update_package_usage_state')
@mock.patch.object(objects.VnfPackage, 'get_by_id')
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._create_vnf')
@mock.patch.object(objects.vnf_package.VnfPackage, 'save')
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_instance, '_vnf_instance_create')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd, 'get_by_id')
def test_create_without_name_and_description(
self, mock_get_by_id,
mock_vnf_instance_create,
mock_get_service_plugins,
mock_package_save,
mock_private_create_vnf,
mock_vnf_package_get_by_id,
mock_update_package_usage_state,
mock_get_vim):
mock_get_vim.return_value = self.vim_info
mock_get_by_id.return_value = fakes.return_vnf_package_vnfd()
mock_vnf_package_get_by_id.return_value = \
fakes.return_vnf_package_with_deployment_flavour()
updates = {'vnfd_id': uuidsentinel.vnfd_id,
'vnf_instance_description': 'SampleVnf Description',
'vnf_instance_name': 'SampleVnf',
'vnf_pkg_id': uuidsentinel.vnf_pkg_id,
'vnf_metadata': {"key": "value"}}
mock_vnf_instance_create.return_value =\
fakes.return_vnf_instance_model(**updates)
body = {'vnfdId': uuidsentinel.vnfd_id,
"vnfInstanceName": "SampleVnf",
"vnfInstanceDescription": "SampleVnf Description",
'metadata': {"key": "value"}}
req = fake_request.HTTPRequest.blank('/vnf_instances')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Create API
resp = req.get_response(self.app)
self.assertEqual(http_client.CREATED, resp.status_code)
updates = {"vnfInstanceName": "SampleVnf",
"vnfInstanceDescription": "SampleVnf Description"}
expected_vnf = fakes.fake_vnf_instance_response(**updates)
location_header = ('http://localhost/vnflcm/v1/vnf_instances/%s'
% resp.json['id'])
self.assertEqual(expected_vnf, resp.json)
self.assertEqual(location_header, resp.headers['location'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data(
{'attribute': 'vnfdId', 'value': True,
'expected_type': 'uuid'},
{'attribute': 'vnfdId', 'value': 123,
'expected_type': 'uuid'},
{'attribute': 'vnfInstanceName', 'value': True,
'expected_type': "name_allow_zero_min_length"},
{'attribute': 'vnfInstanceName', 'value': 123,
'expected_type': "name_allow_zero_min_length"},
{'attribute': 'vnfInstanceDescription', 'value': True,
'expected_type': 'description'},
{'attribute': 'vnfInstanceDescription', 'value': 123,
'expected_type': 'description'}
)
@ddt.unpack
def test_create_with_invalid_request_body(
self, mock_get_service_plugins, attribute, value, expected_type):
"""value of attribute in body is of invalid type"""
body = {"vnfInstanceName": "SampleVnf",
"vnfdId": "29c770a3-02bc-4dfc-b4be-eb173ac00567",
"vnfInstanceDescription": "VNF Description",
"metadata": {"key": "value"}}
req = fake_request.HTTPRequest.blank('/vnf_instances')
body.update({attribute: value})
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
exception = self.assertRaises(
exceptions.ValidationError, self.controller.create,
req, body=body)
if expected_type == 'uuid':
expected_message = ("Invalid input for field/attribute "
"{attribute}. Value: {value}. {value} is not "
"of type 'string'".
format(value=value, attribute=attribute))
elif expected_type in ["name_allow_zero_min_length", "description"]:
expected_message = ("Invalid input for field/attribute "
"{attribute}. " "Value: {value}. {value} is "
"not of type 'string'".
format(value=value, attribute=attribute))
elif expected_type == 'object':
expected_message = ("Invalid input for field/attribute "
"{attribute}. " "Value: {value}. {value} is "
"not of type 'object'".
format(value=value, attribute=attribute,
expected_type=expected_type))
self.assertEqual(expected_message, exception.msg)
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd, 'get_by_id')
def test_create_non_existing_vnf_package_vnfd(self, mock_vnf_by_id,
mock_get_service_plugins,
mock_index,
mock_create_package):
mock_vnf_by_id.side_effect = exceptions.VnfPackageVnfdNotFound
mock_create_package.return_value = fakes.return_vnf_package_vnfd()
mock_response = mock.MagicMock()
mock_response.ok = True
mock_response.json = mock.MagicMock()
mock_response.json.return_value = ['aaa', 'bbb', 'ccc']
mock_index.return_value = mock_response
body = {'vnfdId': uuidsentinel.vnfd_id,
'metadata': {"key": "value"}}
req = fake_request.HTTPRequest.blank('/vnf_instances')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
self.assertRaises(exc.HTTPBadRequest, self.controller.create, req,
body=body)
@mock.patch.object(vim_client.VimClient, "get_vim")
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._update_package_usage_state')
@mock.patch.object(objects.VnfPackage, 'get_by_id')
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._create_vnf')
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(objects.vnf_instance, '_vnf_instance_create')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd, 'get_by_id')
def test_create_vnf_package_not_found(
self, mock_get_by_id_package_vnfd,
mock_vnf_instance_create,
mock_index, mock_create_pkg,
mock_get_service_plugins,
mock_private_create_vnf,
mock_vnf_package_get_by_id,
mock_update_package_usage_state,
mock_get_vim):
mock_get_by_id_package_vnfd.side_effect =\
exceptions.VnfPackageVnfdNotFound
mock_response = mock.MagicMock()
mock_response.ok = True
mock_response.json = mock.MagicMock()
mock_response.json.return_value = ['aaa', 'bbb', 'ccc']
mock_index.return_value = mock_response
mock_create_pkg.return_value = fakes.return_vnf_package_vnfd()
updates = {'vnfd_id': uuidsentinel.vnfd_id}
mock_vnf_instance_create.return_value =\
fakes.return_vnf_instance_model(**updates)
body = {'vnfdId': uuidsentinel.vnfd_id}
req = fake_request.HTTPRequest.blank('/vnf_instances')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.environ['tacker.context'] = self.context
# Call Create API
resp = req.get_response(self.app)
self.assertEqual(http_client.CREATED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(objects.vnf_instance, '_vnf_instance_create')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd, 'get_by_id')
def test_create_vnf_package_vnfd_not_found(
self, mock_get_by_id_package_vnfd,
mock_vnf_instance_create,
mock_index, mock_create_pkg,
mock_get_service_plugins):
mock_get_by_id_package_vnfd.side_effect =\
exceptions.VnfPackageVnfdNotFound
mock_response = mock.MagicMock()
mock_response.ok = True
mock_response.json = mock.MagicMock()
mock_response.json.return_value = ['aaa', 'bbb', 'ccc']
mock_index.return_value = mock_response
mock_create_pkg.return_value = None
updates = {'vnfd_id': uuidsentinel.vnfd_id}
mock_vnf_instance_create.return_value =\
fakes.return_vnf_instance_model(**updates)
body = {'vnfdId': uuidsentinel.vnfd_id}
req = fake_request.HTTPRequest.blank('/vnf_instances')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Create API
resp = req.get_response(self.app)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(objects.vnf_instance, '_vnf_instance_create')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd, 'get_by_id')
def test_create_non_vnf_package_info(
self, mock_get_by_id_package_vnfd,
mock_vnf_instance_create,
mock_index, mock_get_service_plugins):
mock_get_by_id_package_vnfd.side_effect =\
exceptions.VnfPackageVnfdNotFound
mock_response = mock.MagicMock()
mock_response.ok = False
mock_response.json = mock.MagicMock()
mock_response.json.return_value = {}
mock_index.return_value = mock_response
updates = {'vnfd_id': uuidsentinel.vnfd_id}
mock_vnf_instance_create.return_value =\
fakes.return_vnf_instance_model(**updates)
body = {'vnfdId': uuidsentinel.vnfd_id}
req = fake_request.HTTPRequest.blank('/vnf_instances')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Create API
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
def test_create_without_vnfd_id(self, mock_get_service_plugins):
body = {"vnfInstanceName": "SampleVnfInstance",
"metadata": {"key": "value"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data('PATCH', 'PUT', 'HEAD', 'DELETE')
def test_create_not_allowed_http_method(self, method,
mock_get_service_plugins):
"""Wrong HTTP method"""
body = {"vnfdId": uuidsentinel.vnfd_id}
req = fake_request.HTTPRequest.blank('/vnf_instances')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = method
resp = req.get_response(self.app)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data({'name': "A" * 256,
'description': "VNF Description",
'meta': {"key": "value"}},
{'name': 'Fake-VNF',
'description': "A" * 1025,
'meta': {"key": "value"}},
{'name': 'Fake-VNF',
'description': "VNF Description",
'meta': {"key": "v" * 256}})
def test_create_max_length_exceeded_for_vnf_name_and_description(
self, values, mock_get_service_plugins):
name = values['name']
meta = values['meta']
description = values['description']
# vnf instance_name and description with length greater than max
# length defined
body = {"vnfInstanceName": name,
"vnfdId": uuidsentinel.vnfd_id,
"vnfInstanceDescription": description,
'metadata': meta}
req = fake_request.HTTPRequest.blank(
'/vnf_instances')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.VnfInstance, "save")
@mock.patch.object(objects.VnfPackageVnfd, 'get_by_id')
@mock.patch.object(objects.VnfPackage, "get_by_id")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "instantiate")
def test_instantiate_with_deployment_flavour(
self, mock_instantiate, mock_vnf_package_get_by_id,
mock_vnf_package_vnfd_get_by_id, mock_save,
mock_vnf_instance_get_by_id, mock_get_vim,
mock_get_vnf, mock_insta_notfi_process,
mock_get_service_plugins):
mock_vnf_instance_get_by_id.return_value =\
fakes.return_vnf_instance_model()
mock_vnf_package_vnfd_get_by_id.return_value = \
fakes.return_vnf_package_vnfd()
mock_vnf_package_get_by_id.return_value = \
fakes.return_vnf_package_with_deployment_flavour()
mock_get_vnf.return_value = \
self._get_dummy_vnf(
vnf_id=mock_vnf_instance_get_by_id.return_value.id,
status='INACTIVE')
body = {"flavourId": "simple"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_instantiate.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.VnfPackageVnfd, 'get_by_id')
@mock.patch.object(objects.VnfPackage, "get_by_id")
def test_instantiate_with_non_existing_deployment_flavour(
self, mock_vnf_package_get_by_id,
mock_vnf_package_vnfd_get_by_id,
mock_vnf_instance_get_by_id, mock_get_vnf,
mock_get_service_plugins):
mock_vnf_instance_get_by_id.return_value =\
fakes.return_vnf_instance_model()
mock_vnf_package_vnfd_get_by_id.return_value = \
fakes.return_vnf_package_vnfd()
mock_vnf_package_get_by_id.return_value = \
fakes.return_vnf_package_with_deployment_flavour()
mock_get_vnf.return_value = \
self._get_dummy_vnf(
vnf_id=mock_vnf_instance_get_by_id.return_value.id,
status='INACTIVE')
body = {"flavourId": "invalid"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
self.assertEqual("No flavour with id 'invalid'.",
resp.json['badRequest']['message'])
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.VnfInstance, "save")
@mock.patch.object(objects.VnfPackageVnfd, 'get_by_id')
@mock.patch.object(objects.VnfPackage, "get_by_id")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "instantiate")
def test_instantiate_with_instantiation_level(
self, mock_instantiate, mock_vnf_package_get_by_id,
mock_vnf_package_vnfd_get_by_id, mock_save,
mock_vnf_instance_get_by_id, mock_get_vim,
mock_get_vnf, mock_insta_notif_process,
mock_get_service_plugins):
mock_vnf_instance_get_by_id.return_value =\
fakes.return_vnf_instance_model()
mock_vnf_package_vnfd_get_by_id.return_value = \
fakes.return_vnf_package_vnfd()
mock_vnf_package_get_by_id.return_value = \
fakes.return_vnf_package_with_deployment_flavour()
mock_get_vnf.return_value = \
self._get_dummy_vnf(
vnf_id=mock_vnf_instance_get_by_id.return_value.id,
status='INACTIVE')
body = {"flavourId": "simple",
"instantiationLevelId": "instantiation_level_1"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_instantiate.assert_called_once()
mock_get_vnf.assert_called_once()
mock_insta_notif_process.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.VnfInstance, "save")
@mock.patch.object(objects.VnfPackageVnfd, 'get_by_id')
@mock.patch.object(objects.VnfPackage, "get_by_id")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "instantiate")
def test_instantiate_with_no_inst_level_in_flavour(
self, mock_instantiate, mock_vnf_package_get_by_id,
mock_vnf_package_vnfd_get_by_id, mock_save,
mock_vnf_instance_get_by_id, mock_get_vim,
mock_get_vnf, mock_get_service_plugins):
mock_vnf_instance_get_by_id.return_value =\
fakes.return_vnf_instance_model()
mock_vnf_package_vnfd_get_by_id.return_value = \
fakes.return_vnf_package_vnfd()
vnf_package = fakes.return_vnf_package_with_deployment_flavour()
vnf_package.vnf_deployment_flavours[0].instantiation_levels = None
mock_vnf_package_get_by_id.return_value = vnf_package
mock_get_vnf.return_value = \
self._get_dummy_vnf(
vnf_id=mock_vnf_instance_get_by_id.return_value.id,
status='INACTIVE')
# No instantiation level in deployment flavour but it's passed in the
# request
body = {"flavourId": "simple",
"instantiationLevelId": "instantiation_level_1"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
self.assertEqual("No instantiation level with id "
"'instantiation_level_1'.",
resp.json['badRequest']['message'])
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.VnfPackageVnfd, 'get_by_id')
@mock.patch.object(objects.VnfPackage, "get_by_id")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "instantiate")
def test_instantiate_with_non_existing_instantiation_level(
self, mock_instantiate, mock_vnf_package_get_by_id,
mock_vnf_package_vnfd_get_by_id,
mock_vnf_instance_get_by_id, mock_get_vnf,
mock_get_service_plugins):
mock_vnf_instance_get_by_id.return_value =\
fakes.return_vnf_instance_model()
mock_vnf_package_vnfd_get_by_id.return_value = \
fakes.return_vnf_package_vnfd()
mock_vnf_package_get_by_id.return_value = \
fakes.return_vnf_package_with_deployment_flavour()
mock_get_vnf.return_value = \
self._get_dummy_vnf(
vnf_id=mock_vnf_instance_get_by_id.return_value.id,
status='INACTIVE')
body = {"flavourId": "simple",
"instantiationLevelId": "non-existing"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
self.assertEqual("No instantiation level with id 'non-existing'.",
resp.json['badRequest']['message'])
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.VnfLcmController.'
'_notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.VnfInstance, "save")
@mock.patch.object(objects.VnfPackageVnfd, 'get_by_id')
@mock.patch.object(objects.VnfPackage, "get_by_id")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "instantiate")
def test_instantiate_with_vim_connection(
self, mock_instantiate, mock_vnf_package_get_by_id,
mock_vnf_package_vnfd_get_by_id, mock_save,
mock_vnf_instance_get_by_id, mock_get_vim,
mock_get_vnf, mock_insta_notif_process,
mock_get_service_plugins):
mock_vnf_instance_get_by_id.return_value =\
fakes.return_vnf_instance_model()
mock_vnf_package_vnfd_get_by_id.return_value = \
fakes.return_vnf_package_vnfd()
mock_vnf_package_get_by_id.return_value = \
fakes.return_vnf_package_with_deployment_flavour()
mock_get_vnf.return_value = \
self._get_dummy_vnf(
vnf_id=mock_vnf_instance_get_by_id.return_value.id,
status='INACTIVE')
body = {"flavourId": "simple",
"vimConnectionInfo": [
{"id": uuidsentinel.vim_connection_id,
"vimId": uuidsentinel.vim_id,
"vimType": 'openstack'}
]}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_instantiate.assert_called_once()
mock_get_vnf.assert_called_once()
mock_insta_notif_process.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.VnfPackageVnfd, 'get_by_id')
@mock.patch.object(objects.VnfPackage, "get_by_id")
def test_instantiate_with_non_existing_vim(
self, mock_vnf_package_get_by_id,
mock_vnf_package_vnfd_get_by_id,
mock_vnf_instance_get_by_id, mock_get_vim,
mock_get_vnf, mock_get_service_plugins):
mock_vnf_instance_get_by_id.return_value =\
fakes.return_vnf_instance_model()
mock_vnf_package_vnfd_get_by_id.return_value = \
fakes.return_vnf_package_vnfd()
mock_vnf_package_get_by_id.return_value = \
fakes.return_vnf_package_with_deployment_flavour()
mock_get_vim.side_effect = nfvo.VimNotFoundException
mock_get_vnf.return_value = \
self._get_dummy_vnf(
vnf_id=mock_vnf_instance_get_by_id.return_value.id,
status='INACTIVE')
body = {"flavourId": "simple",
"vimConnectionInfo": [
{"id": uuidsentinel.vim_connection_id,
"vimId": uuidsentinel.vim_id,
"vimType": 'openstack'}
]}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
self.assertEqual("VimConnection id is not found: %s" %
uuidsentinel.vim_id,
resp.json['badRequest']['message'])
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.VnfPackageVnfd, 'get_by_id')
@mock.patch.object(objects.VnfPackage, "get_by_id")
def test_instantiate_with_non_existing_region_vim(
self, mock_vnf_package_get_by_id,
mock_vnf_package_vnfd_get_by_id,
mock_vnf_instance_get_by_id, mock_get_vim,
mock_get_vnf, mock_get_service_plugins):
mock_vnf_instance_get_by_id.return_value =\
fakes.return_vnf_instance_model()
mock_vnf_package_vnfd_get_by_id.return_value = \
fakes.return_vnf_package_vnfd()
mock_vnf_package_get_by_id.return_value = \
fakes.return_vnf_package_with_deployment_flavour()
mock_get_vim.side_effect = nfvo.VimRegionNotFoundException
mock_get_vnf.return_value = \
self._get_dummy_vnf(
vnf_id=mock_vnf_instance_get_by_id.return_value.id,
status='INACTIVE')
body = {"flavourId": "simple",
"vimConnectionInfo": [
{'id': uuidsentinel.vim_connection_id,
'vimId': uuidsentinel.vim_id,
'vimType': 'openstack',
'accessInfo': {"region": 'region_non_existing'}}
]}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
self.assertEqual("Region not found for the VimConnection: %s" %
uuidsentinel.vim_id,
resp.json['badRequest']['message'])
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.VnfPackageVnfd, 'get_by_id')
@mock.patch.object(objects.VnfPackage, "get_by_id")
def test_instantiate_with_default_vim_not_configured(
self, mock_vnf_package_get_by_id,
mock_vnf_package_vnfd_get_by_id,
mock_vnf_instance_get_by_id, mock_get_vim,
mock_get_vnf, mock_get_service_plugins):
mock_vnf_instance_get_by_id.return_value =\
fakes.return_vnf_instance_model()
mock_vnf_package_vnfd_get_by_id.return_value = \
fakes.return_vnf_package_vnfd()
mock_vnf_package_get_by_id.return_value = \
fakes.return_vnf_package_with_deployment_flavour()
mock_get_vim.side_effect = nfvo.VimDefaultNotDefined
mock_get_vnf.return_value = \
self._get_dummy_vnf(
vnf_id=mock_vnf_instance_get_by_id.return_value.id,
status='INACTIVE')
body = {"flavourId": "simple"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
self.assertEqual("Default VIM is not defined.",
resp.json['badRequest']['message'])
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
def test_instantiate_incorrect_instantiation_state(
self, mock_vnf_by_id, mock_get_vnf, mock_get_service_plugins):
vnf_instance = fakes.return_vnf_instance_model()
vnf_instance.instantiation_state = 'INSTANTIATED'
mock_vnf_by_id.return_value = vnf_instance
body = {"flavourId": "simple"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
def test_instantiate_incorrect_task_state(
self,
mock_vnf_by_id,
mock_get_vnf,
mock_get_service_plugins):
vnf_instance = fakes.return_vnf_instance_model(
task_state=fields.VnfInstanceTaskState.INSTANTIATING)
mock_vnf_by_id.return_value = vnf_instance
body = {"flavourId": "simple"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
expected_msg = ("Vnf instance %s in task_state INSTANTIATING. Cannot "
"instantiate while the vnf instance is in this state.")
self.assertEqual(expected_msg % uuidsentinel.vnf_instance_id,
resp.json['conflictingRequest']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data({'attribute': 'flavourId', 'value': 123,
'expected_type': 'string'},
{'attribute': 'flavourId', 'value': True,
'expected_type': 'string'},
{'attribute': 'instantiationLevelId', 'value': 123,
'expected_type': 'string'},
{'attribute': 'instantiationLevelId', 'value': True,
'expected_type': 'string'},
{'attribute': 'additionalParams', 'value': ['val1', 'val2'],
'expected_type': 'object'},
{'attribute': 'additionalParams', 'value': True,
'expected_type': 'object'},
{'attribute': 'additionalParams', 'value': 123,
'expected_type': 'object'},
)
@ddt.unpack
def test_instantiate_with_invalid_request_body(
self, mock_get_service_plugins, attribute, value, expected_type):
body = fakes.get_vnf_instantiation_request_body()
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
body.update({attribute: value})
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
exception = self.assertRaises(
exceptions.ValidationError, self.controller.instantiate,
req, body=body)
expected_message = \
("Invalid input for field/attribute {attribute}. Value: {value}. "
"{value} is not of type '{expected_type}'".
format(value=value, attribute=attribute,
expected_type=expected_type))
self.assertEqual(expected_message, exception.msg)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
def test_instantiate_without_flavour_id(self,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes({})
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
self.assertEqual("'flavourId' is a required property",
resp.json['badRequest']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
def test_instantiate_invalid_request_parameter(self,
mock_get_service_plugins):
body = {"flavourId": "simple"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
# Pass invalid request parameter
body = {"flavourId": "simple"}
body.update({'additional_property': 'test_value'})
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
def test_instantiate_with_invalid_uuid(self,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % constants.INVALID_UUID)
body = {"flavourId": "simple"}
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
self.assertEqual(
"Can not find requested vnf: %s" % constants.INVALID_UUID,
resp.json['itemNotFound']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_instantiate_with_non_existing_vnf_instance(
self, mock_vnf_by_id, mock_get_vnf,
mock_get_service_plugins):
mock_vnf_by_id.side_effect = exceptions.VnfInstanceNotFound
body = {"flavourId": "simple"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/instantiate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
self.assertEqual("Can not find requested vnf instance: %s" %
uuidsentinel.vnf_instance_id,
resp.json['itemNotFound']['message'])
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data('HEAD', 'PUT', 'DELETE', 'PATCH', 'GET')
def test_instantiate_invalid_http_method(self, method,
mock_get_service_plugins):
# Wrong HTTP method
body = fakes.get_vnf_instantiation_request_body()
req = fake_request.HTTPRequest.blank(
'/vnf_instances/29c770a3-02bc-4dfc-b4be-eb173ac00567/instantiate')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = method
resp = req.get_response(self.app)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
def test_show_vnf_not_instantiated(self, mock_vnf_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % uuidsentinel.instance_id)
mock_vnf_by_id.return_value = fakes.return_vnf_instance_model()
expected_result = fakes.fake_vnf_instance_response()
res_dict = self.controller.show(req, uuidsentinel.instance_id)
self.assertEqual(expected_result, res_dict)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_show_vnf_instantiated(self, mock_vnf_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % uuidsentinel.instance_id)
mock_vnf_by_id.return_value = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
expected_result = fakes.fake_vnf_instance_response(
fields.VnfInstanceState.INSTANTIATED)
res_dict = self.controller.show(req, uuidsentinel.instance_id)
self.assertEqual(expected_result, res_dict)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
def test_show_with_non_existing_vnf_instance(self, mock_vnf_by_id,
mock_get_service_plugins):
mock_vnf_by_id.side_effect = exceptions.VnfInstanceNotFound
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % uuidsentinel.vnf_instance_id)
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
self.assertEqual("Can not find requested vnf instance: %s" %
uuidsentinel.vnf_instance_id,
resp.json['itemNotFound']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
def test_show_with_invalid_uuid(self,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.INVALID_UUID)
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
self.assertEqual("Can not find requested vnf instance: %s" %
constants.INVALID_UUID,
resp.json['itemNotFound']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data('HEAD', 'PUT', 'POST')
def test_show_invalid_http_method(self, http_method,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.UUID)
req.headers['Content-Type'] = 'application/json'
req.method = http_method
resp = req.get_response(self.app)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(objects.VnfInstance, "save")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "terminate")
@ddt.data({'terminationType': 'FORCEFUL'},
{'terminationType': 'GRACEFUL'},
{'terminationType': 'GRACEFUL',
'gracefulTerminationTimeout': 10})
def test_terminate(self, body, mock_terminate, mock_save,
mock_get_by_id, mock_get_vnf,
mock_notification_process,
mock_get_service_plugins):
vnf_instance_obj = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_get_by_id.return_value = vnf_instance_obj
mock_get_vnf.return_value = \
self._get_dummy_vnf(vnf_id=vnf_instance_obj.id, status='ACTIVE')
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/terminate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_terminate.assert_called_once()
mock_get_vnf.assert_called_once()
mock_notification_process.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data(
{'attribute': 'terminationType', 'value': "TEST",
'expected_type': 'enum'},
{'attribute': 'terminationType', 'value': 123,
'expected_type': 'enum'},
{'attribute': 'terminationType', 'value': True,
'expected_type': 'enum'},
{'attribute': 'gracefulTerminationTimeout', 'value': True,
'expected_type': 'integer'},
{'attribute': 'gracefulTerminationTimeout', 'value': "test",
'expected_type': 'integer'}
)
def test_terminate_with_invalid_request_body(
self, values, mock_get_service_plugins):
attribute = values['attribute']
value = values['value']
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/terminate' % uuidsentinel.vnf_instance_id)
body = {'terminationType': 'GRACEFUL',
'gracefulTerminationTimeout': 10}
body.update({attribute: value})
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
expected_message = ("Invalid input for field/attribute {attribute}. "
"Value: {value}.".
format(value=value, attribute=attribute))
exception = self.assertRaises(exceptions.ValidationError,
self.controller.terminate,
req, constants.UUID, body=body)
self.assertIn(expected_message, exception.msg)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
def test_terminate_missing_termination_type(self,
mock_get_service_plugins):
body = {'gracefulTerminationTimeout': 10}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/terminate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
# Call terminate API
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
self.assertEqual("'terminationType' is a required property",
resp.json['badRequest']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data('GET', 'HEAD', 'PUT', 'DELETE', 'PATCH')
def test_terminate_invalid_http_method(self, method,
mock_get_service_plugins):
# Wrong HTTP method
body = {'terminationType': 'GRACEFUL',
'gracefulTerminationTimeout': 10}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/terminate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = method
resp = req.get_response(self.app)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
def test_terminate_non_existing_vnf_instance(
self, mock_vnf_by_id, mock_get_vnf, mock_get_service_plugins):
body = {'terminationType': 'GRACEFUL',
'gracefulTerminationTimeout': 10}
mock_vnf_by_id.side_effect = exceptions.VnfInstanceNotFound
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/terminate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
self.assertEqual("Can not find requested vnf instance: %s" %
uuidsentinel.vnf_instance_id,
resp.json['itemNotFound']['message'])
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
def test_terminate_incorrect_instantiation_state(
self, mock_vnf_by_id, mock_get_vnf, mock_get_service_plugins):
mock_vnf_by_id.return_value = fakes.return_vnf_instance()
body = {"terminationType": "FORCEFUL"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/terminate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
expected_msg = ("Vnf instance %s in instantiation_state "
"NOT_INSTANTIATED. Cannot terminate while the vnf "
"instance is in this state.")
self.assertEqual(expected_msg % uuidsentinel.vnf_instance_id,
resp.json['conflictingRequest']['message'])
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_terminate_incorrect_task_state(
self,
mock_vnf_by_id,
mock_get_vnf,
mock_get_service_plugins):
vnf_instance = fakes.return_vnf_instance(
instantiated_state=fields.VnfInstanceState.INSTANTIATED,
task_state=fields.VnfInstanceTaskState.TERMINATING)
mock_vnf_by_id.return_value = vnf_instance
body = {"terminationType": "FORCEFUL"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/terminate' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
expected_msg = ("Vnf instance %s in task_state TERMINATING. Cannot "
"terminate while the vnf instance is in this state.")
self.assertEqual(expected_msg % uuidsentinel.vnf_instance_id,
resp.json['conflictingRequest']['message'])
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(objects.VnfInstance, "save")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "heal")
@ddt.data({'cause': 'healing'}, {})
def test_heal(self, body, mock_rpc_heal, mock_save,
mock_vnf_by_id, mock_get_vnf,
mock_heal_notif_process,
mock_get_service_plugins):
vnf_instance_obj = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_by_id.return_value = vnf_instance_obj
mock_get_vnf.return_value = \
self._get_dummy_vnf(vnf_id=vnf_instance_obj.id, status='ACTIVE')
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/heal' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_rpc_heal.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
def test_heal_cause_max_length_exceeded(self,
mock_get_service_plugins):
body = {'cause': 'A' * 256}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/heal' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_heal_incorrect_instantiated_state(
self,
mock_vnf_by_id,
mock_get_vnf,
mock_notif,
mock_get_service_plugins):
vnf_instance_obj = fakes.return_vnf_instance(
fields.VnfInstanceState.NOT_INSTANTIATED)
mock_vnf_by_id.return_value = vnf_instance_obj
body = {}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/heal' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
expected_msg = ("Vnf instance %s in instantiation_state "
"NOT_INSTANTIATED. Cannot heal while the vnf instance "
"is in this state.")
self.assertEqual(expected_msg % uuidsentinel.vnf_instance_id,
resp.json['conflictingRequest']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_heal_incorrect_task_state(self, mock_vnf_by_id, mock_get_vnf,
mock_notif, mock_get_service_plugins):
vnf_instance_obj = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED,
task_state=fields.VnfInstanceTaskState.HEALING)
mock_vnf_by_id.return_value = vnf_instance_obj
body = {}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/heal' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
expected_msg = ("Vnf instance %s in task_state "
"HEALING. Cannot heal while the vnf instance "
"is in this state.")
self.assertEqual(expected_msg % uuidsentinel.vnf_instance_id,
resp.json['conflictingRequest']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_heal_with_invalid_vnfc_id(
self,
mock_vnf_by_id,
mock_get_vnf,
mock_notif,
mock_get_service_plugins):
vnf_instance_obj = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_by_id.return_value = vnf_instance_obj
mock_get_vnf.return_value = \
self._get_dummy_vnf(vnf_id=vnf_instance_obj.id, status='ACTIVE')
body = {'vnfcInstanceId': [uuidsentinel.vnfc_instance_id]}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/heal' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
expected_msg = "Vnfc id %s not present in vnf instance %s"
self.assertEqual(expected_msg % (uuidsentinel.vnfc_instance_id,
uuidsentinel.vnf_instance_id),
resp.json['badRequest']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data('HEAD', 'PUT', 'DELETE', 'PATCH', 'GET')
def test_heal_invalid_http_method(self, method,
mock_get_service_plugins):
body = {}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/heal' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = method
resp = req.get_response(self.app)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data({'attribute': 'cause', 'value': 123,
'expected_type': 'string'},
{'attribute': 'cause', 'value': True,
'expected_type': 'string'},
{'attribute': 'vnfcInstanceId', 'value': 123,
'expected_type': 'array'},
{'attribute': 'vnfcInstanceId', 'value': True,
'expected_type': 'array'},
)
@ddt.unpack
def test_heal_with_invalid_request_body(
self, mock_get_service_plugins, attribute, value, expected_type):
body = {}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/29c770a3-02bc-4dfc-b4be-eb173ac00567/heal')
body.update({attribute: value})
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
exception = self.assertRaises(
exceptions.ValidationError, self.controller.heal,
req, body=body)
expected_message = \
("Invalid input for field/attribute {attribute}. Value: {value}. "
"{value} is not of type '{expected_type}'".
format(value=value, attribute=attribute,
expected_type=expected_type))
self.assertEqual(expected_message, exception.msg)
@mock.patch.object(objects.VnfInstanceList, "get_by_filters")
def test_index(self, mock_vnf_list):
req = fake_request.HTTPRequest.blank('/vnf_instances')
vnf_instance_1 = fakes.return_vnf_instance()
vnf_instance_2 = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_list.return_value = [vnf_instance_1, vnf_instance_2]
resp = self.controller.index(req)
expected_result = [fakes.fake_vnf_instance_response(),
fakes.fake_vnf_instance_response(
fields.VnfInstanceState.INSTANTIATED)]
self.assertEqual(expected_result, resp)
@mock.patch.object(objects.VnfInstanceList, "get_by_filters")
def test_index_empty_response(self, mock_vnf_list):
req = fake_request.HTTPRequest.blank('/vnf_instances')
mock_vnf_list.return_value = []
resp = self.controller.index(req)
self.assertEqual([], resp)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@ddt.data('HEAD', 'PUT', 'DELETE', 'PATCH')
def test_index_invalid_http_method(self, method,
mock_get_service_plugins):
# Wrong HTTP method
req = fake_request.HTTPRequest.blank(
'/vnf_instances')
req.headers['Content-Type'] = 'application/json'
req.method = method
resp = req.get_response(self.app)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._delete')
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.vnf_instance, '_destroy_vnf_instance')
def test_delete(self, mock_destroy_vnf_instance, mock_vnf_by_id,
mock_get_service_plugins, mock_private_delete):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % uuidsentinel.vnf_instance_id)
req.method = 'DELETE'
mock_vnf_by_id.return_value = fakes.return_vnf_instance()
req.headers['Content-Type'] = 'application/json'
# Call delete API
resp = req.get_response(self.app)
self.assertEqual(http_client.NO_CONTENT, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_delete_with_non_existing_vnf_instance(self, mock_vnf_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % uuidsentinel.vnf_instance_id)
req.method = 'DELETE'
mock_vnf_by_id.side_effect = exceptions.VnfInstanceNotFound
# Call delete API
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
self.assertEqual("Can not find requested vnf instance: %s" %
uuidsentinel.vnf_instance_id,
resp.json['itemNotFound']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
def test_delete_with_invalid_uuid(self, mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.INVALID_UUID)
req.method = 'DELETE'
# Call delete API
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
self.assertEqual("Can not find requested vnf instance: %s" %
constants.INVALID_UUID,
resp.json['itemNotFound']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_delete_with_incorrect_instantiation_state(
self, mock_vnf_by_id, mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % uuidsentinel.vnf_instance_id)
req.method = 'DELETE'
vnf_instance = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_by_id.return_value = vnf_instance
# Call delete API
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
expected_msg = ("Vnf instance %s in instantiation_state "
"INSTANTIATED. Cannot delete while the vnf instance "
"is in this state.")
self.assertEqual(expected_msg % uuidsentinel.vnf_instance_id,
resp.json['conflictingRequest']['message'])
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_delete_with_incorrect_task_state(self, mock_vnf_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % uuidsentinel.vnf_instance_id)
req.method = 'DELETE'
vnf_instance = fakes.return_vnf_instance(
fields.VnfInstanceState.NOT_INSTANTIATED,
task_state=fields.VnfInstanceTaskState.ERROR)
mock_vnf_by_id.return_value = vnf_instance
# Call delete API
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
expected_msg = ("Vnf instance %s in task_state ERROR. "
"Cannot delete while the vnf instance "
"is in this state.")
self.assertEqual(expected_msg % uuidsentinel.vnf_instance_id,
resp.json['conflictingRequest']['message'])
@mock.patch.object(objects.VnfInstanceList, "get_by_filters")
@ddt.data(
{'filter': "(eq,vnfInstanceName,'dummy_name')"},
{'filter': "(in,vnfInstanceName,'dummy_name')"},
{'filter': "(cont,vnfInstanceName,'dummy_name')"},
{'filter': "(neq,vnfInstanceName,'dummy_name')"},
{'filter': "(nin,vnfInstanceName,'dummy_name')"},
{'filter': "(ncont,vnfInstanceName,'dummy_name')"},
{'filter': "(gt,vnfdVersion, 1)"},
{'filter': "(gte,vnfdVersion, 1)"},
{'filter': "(lt,vnfdVersion, 1)"},
{'filter': "(lte,vnfdVersion, 1)"},
)
def test_index_filter_operator(self, filter_params, mock_vnf_list):
"""Tests all supported operators in filter expression."""
query = urllib.parse.urlencode(filter_params)
req = fake_request.HTTPRequest.blank(
'/vnflcm/v1/vnf_instances?' + query)
vnf_instance_1 = fakes.return_vnf_instance()
vnf_instance_2 = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_list.return_value = [vnf_instance_1, vnf_instance_2]
res_dict = self.controller.index(req)
expected_result = [fakes.fake_vnf_instance_response(),
fakes.fake_vnf_instance_response(
fields.VnfInstanceState.INSTANTIATED)]
self.assertEqual(expected_result, res_dict)
@mock.patch.object(objects.VnfInstanceList, "get_by_filters")
def test_index_filter_combination(self, mock_vnf_list):
"""Test multiple filter parameters separated by semicolon."""
params = {
'filter': "(eq,vnfInstanceName,'dummy_name');"
"(eq,vnfInstanceDescription,'dummy_desc')"}
query = urllib.parse.urlencode(params)
req = fake_request.HTTPRequest.blank(
'/vnflcm/v1/vnf_instances?' + query)
vnf_instance_1 = fakes.return_vnf_instance()
vnf_instance_2 = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_list.return_value = [vnf_instance_1, vnf_instance_2]
res_dict = self.controller.index(req)
expected_result = [fakes.fake_vnf_instance_response(),
fakes.fake_vnf_instance_response(
fields.VnfInstanceState.INSTANTIATED)]
self.assertEqual(expected_result, res_dict)
@mock.patch.object(objects.VnfInstanceList, "get_by_filters")
@ddt.data(
{'filter': "(eq,vnfInstanceName,dummy_value)"},
{'filter': "(eq,vnfInstanceDescription,dummy value)"},
{'filter': "(eq,instantiationState,'NOT_INSTANTIATED')"},
{'filter': "(eq,taskState,'ACTIVE')"},
{'filter': "(eq,vnfdId,'dummy_vnfd_id')"},
{'filter': "(eq,vnfProvider,'''dummy ''hi'' value''')"},
{'filter': "(eq,vnfProductName,'dummy_product_name')"},
{'filter': "(eq,vnfSoftwareVersion,'1.0')"},
{'filter': "(eq,vnfdVersion,'dummy_vnfd_version')"},
{'filter': "(eq,tenantId,'dummy_tenant_id')"},
{'filter': "(eq,vnfPkgId,'dummy_pkg_id')"},
{'filter': "(eq,vimConnectionInfo/accessInfo/region,'dummy_id')"},
{'filter': "(eq,instantiatedInfo/flavourId,'dummy_flavour')"},
{'filter': "(eq,instantiatedInfo/vnfInstanceId,'dummy_vnf_id')"},
{'filter': "(eq,instantiatedInfo/vnfState,'ACTIVE')"},
{'filter': "(eq,instantiatedInfo/instanceId,'dummy_vnf_id')"},
{'filter':
"(eq,instantiatedInfo/instantiationLevelId,'dummy_level_id')"},
{'filter': "(eq,instantiatedInfo/extCpInfo/id,'dummy_id')"},
{'filter': "(eq,instantiatedInfo/extVirtualLinkInfo/name,'dummy')"},
{'filter':
"(eq,instantiatedInfo/extManagedVirtualLinkInfo/id,'dummy_id')"},
{'filter': "(eq,instantiatedInfo/vnfcResourceInfo/vduId,'dummy_id')"},
{'filter':
"(eq,instantiatedInfo/vnfVirtualLinkResourceInfo/"
"vnfVirtualLinkDescId,'dummy_id')"},
{'filter':
"(eq,instantiatedInfo/virtualStorageResourceInfo/"
"virtualStorageDescId,'dummy_id')"},
{'filter': "(eq,instantiatedInfo/additionalParams/error,'dummy')"},
)
def test_index_filter_attributes(self, filter_params,
mock_vnf_list):
"""Test various attributes supported for filter parameter."""
query = urllib.parse.urlencode(filter_params)
req = fake_request.HTTPRequest.blank(
'/vnflcm/v1/vnf_instances?' + query)
vnf_instance_1 = fakes.return_vnf_instance()
vnf_instance_2 = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_list.return_value = [vnf_instance_1, vnf_instance_2]
res_dict = self.controller.index(req)
expected_result = [fakes.fake_vnf_instance_response(),
fakes.fake_vnf_instance_response(
fields.VnfInstanceState.INSTANTIATED)]
self.assertEqual(expected_result, res_dict)
@mock.patch.object(objects.VnfInstanceList, "get_by_filters")
@ddt.data(
{'filter': "(eq,vnfInstanceName,value"},
{'filter': "eq,vnfInstanceName,value)"},
{'filter': "(eq,vnfInstanceName,value);"},
{'filter': "(eq , vnfInstanceName ,value)"},
)
def test_index_filter_invalid_expression(self, filter_params,
mock_vnf_list):
"""Test invalid filter expression."""
query = urllib.parse.urlencode(filter_params)
req = fake_request.HTTPRequest.blank(
'/vnflcm/v1/vnf_instances?' + query)
self.assertRaises(exceptions.ValidationError,
self.controller.index, req)
@mock.patch.object(objects.VnfInstanceList, "get_by_filters")
@ddt.data(
{'filter': "(eq,vnfInstanceName,singl'quote)"},
{'filter': "(eq,vnfInstanceName,three''' quotes)"},
{'filter': "(eq,vnfInstanceName,round ) bracket)"},
{'filter': "(eq,vnfInstanceName,'dummy 'hi' value')"},
{'filter': "(eq,vnfInstanceName,'dummy's value')"},
{'filter': "(eq,vnfInstanceName,'three ''' quotes')"},
)
def test_index_filter_invalid_string_values(self, filter_params,
mock_vnf_list):
"""Test invalid string values as per ETSI NFV SOL013 5.2.2."""
query = urllib.parse.urlencode(filter_params)
req = fake_request.HTTPRequest.blank(
'/vnflcm/v1/vnf_instances?' + query)
self.assertRaises(exceptions.ValidationError,
self.controller.index, req)
@mock.patch.object(objects.VnfInstanceList, "get_by_filters")
@ddt.data(
{'filter': '(eq,vnfdId,value1,value2)'},
{'filter': '(fake,vnfdId,dummy_vnfd_id)'},
{'filter': '(,vnfdId,dummy_vnfd_id)'},
)
def test_index_filter_invalid_operator(self, filter_params,
mock_vnf_list):
"""Test invalid operator in filter expression."""
query = urllib.parse.urlencode(filter_params)
req = fake_request.HTTPRequest.blank(
'/vnflcm/v1/vnf_instances?' + query)
self.assertRaises(exceptions.ValidationError,
self.controller.index, req)
@mock.patch.object(objects.VnfInstanceList, "get_by_filters")
@ddt.data(
{'filter': '(eq,fakeattr,fakevalue)'},
{'filter': '(eq,,fakevalue)'},
)
def test_index_filter_invalid_attribute(self, filter_params,
mock_vnf_list):
"""Test invalid attribute in filter expression."""
query = urllib.parse.urlencode(filter_params)
req = fake_request.HTTPRequest.blank(
'/vnflcm/v1/vnf_instances?' + query)
self.assertRaises(exceptions.ValidationError,
self.controller.index, req)
@mock.patch.object(objects.VnfInstanceList, "get_by_filters")
@ddt.data(
{'filter': '(eq,data/size,fake_value)'},
{'filter': '(gt,data/createdAt,fake_value)'},
{'filter': '(eq,data/minDisk,fake_value)'},
{'filter': '(eq,data/minRam,fake_value)'},
)
def test_index_filter_invalid_value_type(self, filter_params,
mock_vnf_list):
"""Test values which doesn't match with attribute data type."""
query = urllib.parse.urlencode(filter_params)
req = fake_request.HTTPRequest.blank(
'/vnflcm/v1/vnf_instances?' + query)
self.assertRaises(exceptions.ValidationError,
self.controller.index, req)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
def test_show_lcm_op_occs(self, mock_get_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s' % constants.UUID)
mock_get_by_id.return_value = fakes.return_vnf_lcm_opoccs_obj()
expected_result = fakes.VNFLCMOPOCC_RESPONSE
res_dict = self.controller.show_lcm_op_occs(req, constants.UUID)
self.assertEqual(expected_result, res_dict)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
def test_show_lcm_op_occs_not_found(self, mock_get_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnfpkgm/v1/vnf_packages/%s' % constants.UUID)
mock_get_by_id.side_effect = exceptions.NotFound()
req.headers['Content-Type'] = 'application/json'
req.method = 'GET'
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
@mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd')
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "update")
def test_update_vnf(
self,
mock_update,
mock_vnf_package_vnf_get_vnf_package_vnfd,
mock_vnf_instance_list,
mock_vnf_index_list,
mock_get_service_plugins):
mock_vnf_index_list.return_value = fakes._get_vnf()
mock_vnf_instance_list.return_value = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_package_vnf_get_vnf_package_vnfd.return_value =\
fakes.return_vnf_package_vnfd()
body = {"vnfInstanceName": "new_instance_name",
"vnfInstanceDescription": "new_instance_discription",
"vnfdId": "2c69a161-0000-4b0f-bcf8-391f8fc76600",
"vnfConfigurableProperties": {
"test": "test_value"
},
"vnfcInfoModificationsDeleteIds": ["test1"],
"metadata": {"testkey": "test_value"},
"vimConnectionInfo": {"id": "testid"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'PATCH'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_update.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
def test_update_vnf_none_vnf_data(
self,
mock_vnf_index_list,
mock_get_service_plugins):
mock_vnf_index_list.return_value = None
body = {"vnfInstanceName": "new_instance_name",
"vnfInstanceDescription": "new_instance_discription",
"vnfdId": "2c69a161-0000-4b0f-bcf8-391f8fc76600",
"vnfConfigurableProperties": {
"test": "test_value"
},
"vnfcInfoModificationsDeleteIds": ["test1"],
"metadata": {"testkey": "test_value"},
"vimConnectionInfo": {"id": "testid"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'PATCH'
msg = _("Can not find requested vnf data: %s") % constants.UUID
res = self._make_problem_detail(msg, 404, title='Not Found')
resp = req.get_response(self.app)
self.assertEqual(res.text, resp.text)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
def test_update_vnf_status_err(
self,
mock_vnf_index_list,
mock_get_service_plugins):
updates = {'status': 'ERROR'}
mock_vnf_index_list.return_value = fakes._get_vnf(**updates)
body = {"vnfInstanceName": "new_instance_name",
"vnfInstanceDescription": "new_instance_discription",
"vnfdId": "2c69a161-0000-4b0f-bcf8-391f8fc76600",
"vnfConfigurableProperties": {
"test": "test_value"
},
"vnfcInfoModificationsDeleteIds": ["test1"],
"metadata": {"testkey": "test_value"},
"vimConnectionInfo": {"id": "testid"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'PATCH'
msg = _("VNF %(id)s status is %(state)s") % {
"id": constants.UUID, "state": "ERROR"}
res = self._make_problem_detail(msg %
{"state": "ERROR"}, 409, 'Conflict')
resp = req.get_response(self.app)
self.assertEqual(res.text, resp.text)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
def test_update_vnf_none_instance_data(
self,
mock_vnf_instance_list,
mock_vnf_index_list,
mock_get_service_plugins):
mock_vnf_index_list.return_value = fakes._get_vnf()
mock_vnf_instance_list.return_value = ""
body = {"vnfInstanceName": "new_instance_name",
"vnfInstanceDescription": "new_instance_discription",
"vnfdId": "2c69a161-0000-4b0f-bcf8-391f8fc76600",
"vnfConfigurableProperties": {
"test": "test_value"
},
"vnfcInfoModificationsDeleteIds": ["test1"],
"metadata": {"testkey": "test_value"},
"vimConnectionInfo": {"id": "testid"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'PATCH'
vnf_data = fakes._get_vnf()
msg = ("Can not find requested vnf instance data: %s") % vnf_data.get(
'vnfd_id')
res = self._make_problem_detail(msg, 404, title='Not Found')
resp = req.get_response(self.app)
self.assertEqual(res.text, resp.text)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd,
"get_vnf_package_vnfd")
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
@mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd')
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "update")
def test_update_vnf_none_vnfd(
self,
mock_update,
mock_vnf_package_vnf_get_vnf_package_vnfd,
mock_vnf_instance_list,
mock_vnf_index_list,
mock_index,
mock_get_vnf_package_vnfd,
mock_create_package,
mock_get_service_plugins):
mock_vnf_index_list.return_value = fakes._get_vnf()
mock_vnf_instance_list.return_value = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_package_vnf_get_vnf_package_vnfd.return_value = ""
mock_get_vnf_package_vnfd.side_effect =\
exceptions.VnfPackageVnfdNotFound
mock_create_package.return_value = fakes.return_vnf_package_vnfd()
mock_response = mock.MagicMock()
mock_response.ok = True
mock_response.json = mock.MagicMock()
mock_response.json.return_value = ['aaa', 'bbb', 'ccc']
mock_index.return_value = mock_response
body = {"vnfInstanceName": "new_instance_name",
"vnfInstanceDescription": "new_instance_discription",
"vnfPkgId": "2c69a161-0000-4b0f-bcf8-391f8fc76600",
"vnfConfigurableProperties": {"test": "test_value"},
"vnfcInfoModificationsDeleteIds": ["test1"]}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'PATCH'
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_update.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
@mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd')
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "update")
def test_update_vnf_with_pkg_id(
self, mock_update,
mock_vnf_package_vnf_get_vnf_package_vnfd,
mock_vnf_instance_list, mock_vnf_index_list,
mock_get_service_plugins):
mock_vnf_index_list.return_value = fakes._get_vnf()
mock_vnf_instance_list.return_value = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_package_vnf_get_vnf_package_vnfd.return_value =\
fakes.return_vnf_package_vnfd()
body = {"vnfInstanceName": "new_instance_name",
"vnfInstanceDescription": "new_instance_discription",
"vnfPkgId": "2c69a161-0000-4b0f-bcf8-391f8fc76600",
"vnfConfigurableProperties": {"test": "test_value"},
"vnfcInfoModificationsDeleteIds": ["test1"]}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'PATCH'
# Call Instantiate API
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_update.assert_called_once()
@ddt.data("vnfdId", "vnfPkgId")
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd,
"get_vnf_package_vnfd")
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
@mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd')
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "update")
def test_update_none_vnf_package_info(
self, input_id,
mock_update,
mock_vnf_package_vnf_get_vnf_package_vnfd,
mock_vnf_instance_list,
mock_vnf_index_list,
mock_index,
mock_get_vnf_package_vnfd,
mock_create_package,
mock_get_service_plugins):
mock_vnf_index_list.return_value = fakes._get_vnf()
mock_vnf_instance_list.return_value = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_package_vnf_get_vnf_package_vnfd.return_value = ""
mock_get_vnf_package_vnfd.side_effect =\
exceptions.VnfPackageVnfdNotFound
mock_create_package.return_value = fakes.return_vnf_package_vnfd()
mock_response = mock.MagicMock()
mock_response.ok = False
mock_response.json = mock.MagicMock()
mock_response.json.return_value = ['aaa', 'bbb', 'ccc']
mock_index.return_value = mock_response
body = {"vnfInstanceName": "new_instance_name",
"vnfInstanceDescription": "new_instance_discription",
input_id: "2c69a161-0000-4b0f-bcf8-391f8fc76600",
"vnfConfigurableProperties": {"test": "test_value"},
"vnfcInfoModificationsDeleteIds": ["test1"]}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'PATCH'
resp = req.get_response(self.app)
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
@ddt.data("vnfdId", "vnfPkgId")
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM':
test_nfvo_plugin.FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd,
"get_vnf_package_vnfd")
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
@mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd')
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "update")
def test_update_none_vnf_package_vnfd(
self, input_id,
mock_update,
mock_vnf_package_vnf_get_vnf_package_vnfd,
mock_vnf_instance_list,
mock_vnf_index_list,
mock_index,
mock_get_vnf_package_vnfd,
mock_create_package,
mock_get_service_plugins):
mock_vnf_index_list.return_value = fakes._get_vnf()
mock_vnf_instance_list.return_value = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED)
mock_vnf_package_vnf_get_vnf_package_vnfd.return_value = ""
mock_get_vnf_package_vnfd.return_value = None
mock_create_package.return_value = None
mock_response = mock.MagicMock()
mock_response.ok = True
mock_response.json = mock.MagicMock()
mock_response.json.return_value = ['aaa', 'bbb', 'ccc']
mock_index.return_value = mock_response
body = {"vnfInstanceName": "new_instance_name",
"vnfInstanceDescription": "new_instance_discription",
input_id: "2c69a161-0000-4b0f-bcf8-391f8fc76600",
"vnfConfigurableProperties": {"test": "test_value"},
"vnfcInfoModificationsDeleteIds": ["test1"]}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'PATCH'
resp = req.get_response(self.app)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_scale_not_scale_err(
self,
mock_vnf_instance_get_by_id,
mock_get_service_plugins):
mock_vnf_instance_get_by_id.return_value =\
fakes.return_vnf_instance(fields.VnfInstanceState.INSTANTIATED)
body = {
"type": "SCALE_OUT",
"aspectId": "SP1",
"numberOfSteps": 1,
"additionalParams": {
"test": "test_value"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/scale' %
constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
res = self._make_problem_detail(
'NOT SCALE VNF', 409, title='NOT SCALE VNF')
resp = req.get_response(self.app)
self.assertEqual(res.text, resp.text)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
def test_scale_not_active_err(self,
mock_get_service_plugins):
body = {
"type": "SCALE_OUT",
"aspectId": "SP1",
"numberOfSteps": 1,
"additionalParams": {
"test": "test_value"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/scale' %
'91e32c20-6d1f-47a4-9ba7-08f5e5effe07')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
res = self._make_problem_detail(
'VNF IS NOT ACTIVE', 409, title='VNF IS NOT ACTIVE')
resp = req.get_response(self.app)
self.assertEqual(res.text, resp.text)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
def test_scale_vnfnotfound_err(self,
mock_get_service_plugins):
msg = _('VNF %(vnf_id)s could not be found')
body = {
"type": "SCALE_OUT",
"aspectId": "SP1",
"numberOfSteps": 1,
"additionalParams": {
"test": "test_value"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/scale' %
'7168062e-9fa1-4203-8cb7-f5c99ff3ee1b')
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
res = self._make_problem_detail(msg, 404, title='VNF NOT FOUND')
resp = req.get_response(self.app)
self.assertEqual(res.text, resp.text)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "create")
@mock.patch.object(objects.ScaleVnfRequest, "obj_from_primitive")
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, "get_vnf")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "scale")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "send_notification")
def test_scale_in(
self,
mock_send_notification,
mock_scale,
mock_get_vnf,
mock_vnf_instance_get_by_id,
mock_obj_from_primitive,
mock_create,
mock_get_service_plugins):
mock_get_vnf.return_value = fakes._get_vnf()
mock_vnf_instance_get_by_id.return_value = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED, scale_status="scale_status")
mock_obj_from_primitive.return_value = fakes.scale_request_make(
"SCALE_IN", 1)
mock_create.return_value = 200
body = {
"type": "SCALE_IN",
"aspectId": "SP1",
"numberOfSteps": 1,
"additionalParams": {
"test": "test_value"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/scale' %
constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_scale.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "create")
@mock.patch.object(objects.ScaleVnfRequest, "obj_from_primitive")
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, "get_vnf")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "scale")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "send_notification")
def test_scale_out(
self,
mock_send_notification,
mock_scale,
mock_get_vnf,
mock_vnf_instance_get_by_id,
mock_obj_from_primitive,
mock_create,
mock_get_service_plugins):
mock_get_vnf.return_value = fakes._get_vnf()
mock_vnf_instance_get_by_id.return_value = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED, scale_status="scale_status")
mock_obj_from_primitive.return_value = fakes.scale_request_make(
"SCALE_OUT", 1)
mock_create.return_value = 200
body = {
"type": "SCALE_OUT",
"aspectId": "SP1",
"numberOfSteps": 1,
"additionalParams": {
"test": "test_value"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/scale' %
constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_scale.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "create")
@mock.patch.object(objects.ScaleVnfRequest, "obj_from_primitive")
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, "get_vnf")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "scale")
def test_scale_in_err(
self,
mock_scale,
mock_get_vnf,
mock_vnf_instance_get_by_id,
mock_obj_from_primitive,
mock_create,
mock_get_service_plugins):
mock_get_vnf.return_value = fakes._get_vnf()
mock_vnf_instance_get_by_id.return_value = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED, scale_status="scale_status")
mock_obj_from_primitive.return_value = fakes.scale_request_make(
"SCALE_IN", 4)
mock_create.return_value = 200
body = {
"type": "SCALE_IN",
"aspectId": "SP1",
"numberOfSteps": 1,
"additionalParams": {
"test": "test_value"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/scale' %
constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
res = self._make_problem_detail(
'can not scale_in', 400, title='can not scale_in')
resp = req.get_response(self.app)
self.assertEqual(res.text, resp.text)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "create")
@mock.patch.object(objects.ScaleVnfRequest, "obj_from_primitive")
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, "get_vnf")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "scale")
def test_scale_out_err(
self,
mock_scale,
mock_get_vnf,
mock_vnf_instance_get_by_id,
mock_obj_from_primitive,
mock_create,
mock_get_service_plugins):
mock_get_vnf.return_value = fakes._get_vnf()
mock_vnf_instance_get_by_id.return_value = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED, scale_status="scale_status")
mock_obj_from_primitive.return_value = fakes.scale_request_make(
"SCALE_OUT", 4)
mock_create.return_value = 200
body = {
"type": "SCALE_OUT",
"aspectId": "SP1",
"numberOfSteps": 1,
"additionalParams": {
"test": "test_value"}}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/scale' %
constants.UUID)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
res = self._make_problem_detail(
'can not scale_out', 400, title='can not scale_out')
resp = req.get_response(self.app)
self.assertEqual(res.text, resp.text)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.ScaleVnfRequest, "obj_from_primitive")
@mock.patch.object(controller.VnfLcmController, "_get_rollback_vnf")
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfLcmOpOcc, "create")
def test_scale_notification(
self,
mock_create,
mock_send_notification,
mock_vnf_instance,
mock_get_vnf,
mock_obj_from_primitive,
get_service_plugins):
body = {"type": "SCALE_OUT", "aspect_id": "SP1"}
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s/scale' % uuidsentinel.vnf_instance_id)
req.body = jsonutils.dump_as_bytes(body)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
vnf_obj = fakes.vnf_scale()
mock_obj_from_primitive.return_value = fakes.scale_request_make(
"SCALE_IN", 1)
mock_get_vnf.return_value = vnf_obj
vnf_instance = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED,
scale_status="scale_status")
vnf_instance.instantiated_vnf_info.instance_id =\
uuidsentinel.instance_id
vnf_instance.instantiated_vnf_info.vnf_instance_id =\
uuidsentinel.vnf_instance_id
vnf_instance.instantiated_vnf_info.scale_status = []
vnf_instance.instantiated_vnf_info.scale_status.append(
objects.ScaleInfo(aspect_id='SP1', scale_level=0))
mock_vnf_instance.return_value = vnf_instance
vnf_info = fakes._get_vnf()
vnf_instance = fakes.return_vnf_instance(
fields.VnfInstanceState.INSTANTIATED, scale_status="scale_status")
self.controller._scale(self.context,
vnf_info, vnf_instance, body)
mock_send_notification.assert_called_once()
self.assertEqual(mock_send_notification.call_args[0][1].get(
'notificationType'), 'VnfLcmOperationOccurrenceNotification')
self.assertEqual(
mock_send_notification.call_args[0][1].get('vnfInstanceId'),
vnf_instance.instantiated_vnf_info.vnf_instance_id)
self.assertEqual(mock_send_notification.call_args[0][1].get(
'notificationStatus'), 'START')
self.assertEqual(
mock_send_notification.call_args[0][1].get('operation'),
'SCALE')
self.assertEqual(
mock_send_notification.call_args[0][1].get('operationState'),
'STARTING')
self.assertEqual(mock_send_notification.call_args[0][1].get(
'isAutomaticInvocation'), 'False')
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
@mock.patch.object(controller.VnfLcmController, "_get_rollback_vnf")
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "rollback")
def test_rollback(
self,
mock_rollback,
mock_vnf_instance,
mock_get_vnf,
mock_lcm_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % uuidsentinel.vnf_instance_id)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
vnf_lcm_op_occs = fakes.vnflcm_rollback()
mock_lcm_by_id.return_value = vnf_lcm_op_occs
vnf_obj = fakes.vnf_rollback()
mock_get_vnf.return_value = vnf_obj
vnf_instance = fakes.return_vnf_instance(
fields.VnfInstanceState.NOT_INSTANTIATED,
task_state=fields.VnfInstanceTaskState.ERROR)
mock_vnf_instance.return_value = vnf_instance
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_rollback.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
@mock.patch.object(controller.VnfLcmController, "_get_rollback_vnf")
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "rollback")
def test_rollback_2(
self,
mock_rollback,
mock_vnf_instance,
mock_get_vnf,
mock_lcm_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % uuidsentinel.vnf_instance_id)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
vnf_lcm_op_occs = fakes.vnflcm_rollback_insta()
mock_lcm_by_id.return_value = vnf_lcm_op_occs
vnf_obj = fakes.vnf_rollback()
mock_get_vnf.return_value = vnf_obj
vnf_instance = fakes.return_vnf_instance(
fields.VnfInstanceState.NOT_INSTANTIATED,
task_state=fields.VnfInstanceTaskState.ERROR)
mock_vnf_instance.return_value = vnf_instance
resp = req.get_response(self.app)
self.assertEqual(http_client.ACCEPTED, resp.status_code)
mock_rollback.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
def test_rollback_vnf_lcm_op_occs_access_error(self,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % uuidsentinel.vnf_instance_id)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
resp = req.get_response(self.app)
self.assertEqual(500, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
def test_rollback_lcm_not_found(self, mock_lcm_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % constants.INVALID_UUID)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
mock_lcm_by_id.side_effect = exceptions.NotFound(resource='table',
name='vnf_lcm_op_occs')
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
def test_rollback_not_failed_temp(self, mock_lcm_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % uuidsentinel.vnf_instance_id)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
vnf_lcm_op_occs = fakes.vnflcm_rollback_active()
mock_lcm_by_id.return_value = vnf_lcm_op_occs
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id",)
def test_rollback_not_ope(self, mock_lcm_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % uuidsentinel.vnf_instance_id)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
vnf_lcm_op_occs = fakes.vnflcm_rollback_ope()
mock_lcm_by_id.return_value = vnf_lcm_op_occs
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
def test_rollback_not_scale_in(self, mock_lcm_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % uuidsentinel.vnf_instance_id)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
vnf_lcm_op_occs = fakes.vnflcm_rollback_scale_in()
mock_lcm_by_id.return_value = vnf_lcm_op_occs
resp = req.get_response(self.app)
self.assertEqual(http_client.CONFLICT, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(controller.VnfLcmController, "_get_rollback_vnf")
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
def test_rollback_vnf_error(self, mock_lcm_by_id, mock_get_vnf,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % uuidsentinel.vnf_instance_id)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
vnf_lcm_op_occs = fakes.vnflcm_rollback_insta()
mock_lcm_by_id.return_value = vnf_lcm_op_occs
mock_get_vnf.side_effect = Exception("error")
resp = req.get_response(self.app)
self.assertEqual(500, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
@mock.patch.object(controller.VnfLcmController, "_get_rollback_vnf")
def test_rollback_vnf_not_found(self, mock_get_vnf, mock_lcm_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % uuidsentinel.vnf_instance_id)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
vnf_lcm_op_occs = fakes.vnflcm_rollback_insta()
mock_lcm_by_id.return_value = vnf_lcm_op_occs
mock_get_vnf.side_effect = vnfm.VNFNotFound(
vnf_id=uuidsentinel.vnf_instance_id)
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
@mock.patch.object(controller.VnfLcmController, "_get_rollback_vnf")
def test_rollback_vnf_instance_error(self, mock_get_vnf, mock_lcm_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % uuidsentinel.vnf_instance_id)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
vnf_lcm_op_occs = fakes.vnflcm_rollback_insta()
mock_lcm_by_id.return_value = vnf_lcm_op_occs
vnf_obj = fakes.vnf_rollback()
mock_get_vnf.return_value = vnf_obj
resp = req.get_response(self.app)
self.assertEqual(500, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_id")
@mock.patch.object(controller.VnfLcmController, "_get_rollback_vnf")
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_rollback_vnf_instance_not_found(
self, mock_vnf_instance, mock_get_vnf, mock_lcm_by_id,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_lcm_op_occs/%s/rollback' % uuidsentinel.vnf_instance_id)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
vnf_lcm_op_occs = fakes.vnflcm_rollback_insta()
mock_lcm_by_id.return_value = vnf_lcm_op_occs
vnf_obj = fakes.vnf_rollback()
mock_get_vnf.return_value = vnf_obj
mock_vnf_instance.side_effect = vnfm.VNFNotFound(
vnf_id=uuidsentinel.vnf_instance_id)
resp = req.get_response(self.app)
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
| 44.433191
| 79
| 0.639405
| 13,939
| 125,035
| 5.392281
| 0.04204
| 0.046978
| 0.052486
| 0.037173
| 0.882455
| 0.861754
| 0.839974
| 0.827774
| 0.816519
| 0.806674
| 0
| 0.008782
| 0.251418
| 125,035
| 2,813
| 80
| 44.448987
| 0.79425
| 0.014404
| 0
| 0.76771
| 0
| 0
| 0.183675
| 0.065919
| 0
| 0
| 0
| 0
| 0.063015
| 1
| 0.044893
| false
| 0.000824
| 0.012356
| 0.002883
| 0.068781
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7d10d2e67694ba01822ecd0c7b9e0075e8e21630
| 118
|
py
|
Python
|
5 kyu/Program hangs.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 6
|
2020-09-03T09:32:25.000Z
|
2020-12-07T04:10:01.000Z
|
5 kyu/Program hangs.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 1
|
2021-12-13T15:30:21.000Z
|
2021-12-13T15:30:21.000Z
|
5 kyu/Program hangs.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | null | null | null |
def wrap_mystery(n):
print(n)
return -1 if n in [17,27,34,43,54,68,86,108,136,172,216,272,275] else mystery(n)
| 39.333333
| 84
| 0.661017
| 27
| 118
| 2.851852
| 0.851852
| 0.207792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.33
| 0.152542
| 118
| 3
| 84
| 39.333333
| 0.44
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
7d459350b5aa195c79b3cd38fee6fea64c2a3a45
| 96
|
py
|
Python
|
pybana/translators/vega/__init__.py
|
optimdata/pybana
|
a88561199f25310b74c905b93238863e996f613e
|
[
"MIT"
] | 10
|
2019-10-31T08:47:27.000Z
|
2022-03-16T10:43:56.000Z
|
pybana/translators/vega/__init__.py
|
optimdata/pybana
|
a88561199f25310b74c905b93238863e996f613e
|
[
"MIT"
] | 2
|
2021-03-26T15:35:51.000Z
|
2021-12-17T14:56:46.000Z
|
pybana/translators/vega/__init__.py
|
optimdata/pybana
|
a88561199f25310b74c905b93238863e996f613e
|
[
"MIT"
] | 1
|
2020-08-25T08:28:34.000Z
|
2020-08-25T08:28:34.000Z
|
from .metrics import * # NOQA
from .vega import * # NOQA
from .visualization import * # NOQA
| 24
| 36
| 0.6875
| 12
| 96
| 5.5
| 0.5
| 0.454545
| 0.424242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 96
| 3
| 37
| 32
| 0.88
| 0.145833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bc2b79e05aa6a1f47ebd349438dbdf3c0f5e5f30
| 44,696
|
py
|
Python
|
neutron/tests/unit/conf/policies/test_qos.py
|
dangervon/neutron
|
06ce0c2c94d2256a8f6804a1eacb0733747dcf46
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/conf/policies/test_qos.py
|
dangervon/neutron
|
06ce0c2c94d2256a8f6804a1eacb0733747dcf46
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/conf/policies/test_qos.py
|
dangervon/neutron
|
06ce0c2c94d2256a8f6804a1eacb0733747dcf46
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslo_policy import policy as base_policy
from oslo_utils import uuidutils
from neutron import policy
from neutron.tests.unit.conf.policies import test_base as base
class QosPolicyAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(QosPolicyAPITestCase, self).setUp()
self.target = {'project_id': self.project_id}
self.alt_target = {'project_id': self.alt_project_id}
class SystemAdminQosPolicyTests(QosPolicyAPITestCase):
def setUp(self):
super(SystemAdminQosPolicyTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_policy(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_policy', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_policy', self.alt_target)
def test_create_policy(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_policy', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_policy', self.alt_target)
def test_update_policy(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_policy', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_policy', self.alt_target)
def test_delete_policy(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'delete_policy', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'delete_policy', self.alt_target)
class SystemMemberQosPolicyTests(SystemAdminQosPolicyTests):
def setUp(self):
super(SystemMemberQosPolicyTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderQosPolicyTests(SystemMemberQosPolicyTests):
def setUp(self):
super(SystemReaderQosPolicyTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminQosPolicyTests(QosPolicyAPITestCase):
def setUp(self):
super(ProjectAdminQosPolicyTests, self).setUp()
self.context = self.project_admin_ctx
def test_get_policy(self):
self.assertTrue(
policy.enforce(self.context, 'get_policy', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'get_policy', self.alt_target)
def test_create_policy(self):
self.assertTrue(
policy.enforce(self.context, 'create_policy', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_policy', self.alt_target)
def test_update_policy(self):
self.assertTrue(
policy.enforce(self.context, 'update_policy', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_policy', self.alt_target)
def test_delete_policy(self):
self.assertTrue(
policy.enforce(self.context, 'delete_policy', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'delete_policy', self.alt_target)
class ProjectMemberQosPolicyTests(ProjectAdminQosPolicyTests):
def setUp(self):
super(ProjectMemberQosPolicyTests, self).setUp()
self.context = self.project_member_ctx
def test_create_policy(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_policy', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_policy', self.alt_target)
def test_update_policy(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_policy', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_policy', self.alt_target)
def test_delete_policy(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'delete_policy', self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'delete_policy', self.alt_target)
class ProjectReaderQosPolicyTests(ProjectMemberQosPolicyTests):
def setUp(self):
super(ProjectReaderQosPolicyTests, self).setUp()
self.context = self.project_reader_ctx
class QosRuleTypeAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(QosRuleTypeAPITestCase, self).setUp()
self.target = {}
class SystemAdminQosRuleTypeTests(QosRuleTypeAPITestCase):
def setUp(self):
super(SystemAdminQosRuleTypeTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_rule_type(self):
self.assertTrue(
policy.enforce(self.context, 'get_rule_type', self.target))
class SystemMemberQosRuleTypeTests(SystemAdminQosRuleTypeTests):
def setUp(self):
super(SystemMemberQosRuleTypeTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderQosRuleTypeTests(SystemMemberQosRuleTypeTests):
def setUp(self):
super(SystemReaderQosRuleTypeTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminQosRuleTypeTests(QosRuleTypeAPITestCase):
def setUp(self):
super(ProjectAdminQosRuleTypeTests, self).setUp()
self.context = self.project_admin_ctx
def test_get_rule_type(self):
self.assertTrue(
policy.enforce(self.context, 'get_rule_type', self.target))
class ProjectMemberQosRuleTypeTests(ProjectAdminQosRuleTypeTests):
def setUp(self):
super(ProjectMemberQosRuleTypeTests, self).setUp()
self.context = self.project_member_ctx
def test_get_rule_type(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_rule_type', self.target)
class ProjectReaderQosRuleTypeTests(ProjectMemberQosRuleTypeTests):
def setUp(self):
super(ProjectReaderQosRuleTypeTests, self).setUp()
self.context = self.project_reader_ctx
class QosRulesAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(QosRulesAPITestCase, self).setUp()
self.qos_policy = {
'id': uuidutils.generate_uuid(),
'project_id': self.project_id}
self.target = {
'project_id': self.project_id,
'policy_id': self.qos_policy['id'],
'ext_parent_policy_id': self.qos_policy['id']}
self.alt_target = {
'project_id': self.alt_project_id,
'policy_id': self.qos_policy['id'],
'ext_parent_policy_id': self.qos_policy['id']}
self.plugin_mock = mock.Mock()
self.plugin_mock.get_qos_policy.return_value = self.qos_policy
mock.patch(
'neutron_lib.plugins.directory.get_plugin',
return_value=self.plugin_mock).start()
class SystemAdminQosBandwidthLimitRuleTests(QosRulesAPITestCase):
def setUp(self):
super(SystemAdminQosBandwidthLimitRuleTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_policy_bandwidth_limit_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_policy_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_policy_bandwidth_limit_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_alias_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_alias_bandwidth_limit_rule',
self.alt_target)
def test_create_policy_bandwidth_limit_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_policy_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_policy_bandwidth_limit_rule',
self.alt_target)
def test_update_policy_bandwidth_limit_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_policy_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_policy_bandwidth_limit_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_alias_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_alias_bandwidth_limit_rule',
self.alt_target)
def test_delete_policy_bandwidth_limit_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_policy_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_policy_bandwidth_limit_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_alias_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_alias_bandwidth_limit_rule',
self.alt_target)
class SystemMemberQosBandwidthLimitRuleTests(
SystemAdminQosBandwidthLimitRuleTests):
def setUp(self):
super(SystemMemberQosBandwidthLimitRuleTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderQosBandwidthLimitRuleTests(
SystemMemberQosBandwidthLimitRuleTests):
def setUp(self):
super(SystemReaderQosBandwidthLimitRuleTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminQosBandwidthLimitRuleTests(QosRulesAPITestCase):
def setUp(self):
super(ProjectAdminQosBandwidthLimitRuleTests, self).setUp()
self.context = self.project_admin_ctx
def test_get_policy_bandwidth_limit_rule(self):
self.assertTrue(
policy.enforce(self.context,
'get_policy_bandwidth_limit_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_policy_bandwidth_limit_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(self.context,
'get_alias_bandwidth_limit_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_alias_bandwidth_limit_rule',
self.alt_target)
def test_create_policy_bandwidth_limit_rule(self):
self.assertTrue(
policy.enforce(self.context,
'create_policy_bandwidth_limit_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_bandwidth_limit_rule',
self.alt_target)
def test_update_policy_bandwidth_limit_rule(self):
self.assertTrue(
policy.enforce(self.context,
'update_policy_bandwidth_limit_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_bandwidth_limit_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(self.context,
'update_alias_bandwidth_limit_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_bandwidth_limit_rule',
self.alt_target)
def test_delete_policy_bandwidth_limit_rule(self):
self.assertTrue(
policy.enforce(self.context,
'delete_policy_bandwidth_limit_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_bandwidth_limit_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(self.context,
'delete_alias_bandwidth_limit_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_bandwidth_limit_rule',
self.alt_target)
class ProjectMemberQosBandwidthLimitRuleTests(
ProjectAdminQosBandwidthLimitRuleTests):
def setUp(self):
super(ProjectMemberQosBandwidthLimitRuleTests, self).setUp()
self.context = self.project_member_ctx
def test_create_policy_bandwidth_limit_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_bandwidth_limit_rule',
self.alt_target)
def test_update_policy_bandwidth_limit_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_bandwidth_limit_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_bandwidth_limit_rule',
self.alt_target)
def test_delete_policy_bandwidth_limit_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_bandwidth_limit_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_bandwidth_limit_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_bandwidth_limit_rule',
self.alt_target)
class ProjectReaderQosBandwidthLimitRuleTests(
ProjectMemberQosBandwidthLimitRuleTests):
def setUp(self):
super(ProjectReaderQosBandwidthLimitRuleTests, self).setUp()
self.context = self.project_reader_ctx
class SystemAdminQosDSCPMarkingRuleTests(QosRulesAPITestCase):
def setUp(self):
super(SystemAdminQosDSCPMarkingRuleTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_policy_dscp_marking_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_policy_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_policy_dscp_marking_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_alias_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_alias_dscp_marking_rule',
self.alt_target)
def test_create_policy_dscp_marking_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_policy_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_policy_dscp_marking_rule',
self.alt_target)
def test_update_policy_dscp_marking_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_policy_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_policy_dscp_marking_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_alias_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_alias_dscp_marking_rule',
self.alt_target)
def test_delete_policy_dscp_marking_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_policy_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_policy_dscp_marking_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_alias_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_alias_dscp_marking_rule',
self.alt_target)
class SystemMemberQosDSCPMarkingRuleTests(SystemAdminQosDSCPMarkingRuleTests):
def setUp(self):
super(SystemMemberQosDSCPMarkingRuleTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderQosDSCPMarkingRuleTests(SystemMemberQosDSCPMarkingRuleTests):
def setUp(self):
super(SystemReaderQosDSCPMarkingRuleTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminQosDSCPMarkingRuleTests(QosRulesAPITestCase):
def setUp(self):
super(ProjectAdminQosDSCPMarkingRuleTests, self).setUp()
self.context = self.project_admin_ctx
def test_get_policy_dscp_marking_rule(self):
self.assertTrue(
policy.enforce(self.context,
'get_policy_dscp_marking_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_policy_dscp_marking_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(self.context,
'get_alias_dscp_marking_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_alias_dscp_marking_rule',
self.alt_target)
def test_create_policy_dscp_marking_rule(self):
self.assertTrue(
policy.enforce(self.context,
'create_policy_dscp_marking_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_dscp_marking_rule',
self.alt_target)
def test_update_policy_dscp_marking_rule(self):
self.assertTrue(
policy.enforce(self.context,
'update_policy_dscp_marking_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_dscp_marking_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(self.context,
'update_alias_dscp_marking_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_dscp_marking_rule',
self.alt_target)
def test_delete_policy_dscp_marking_rule(self):
self.assertTrue(
policy.enforce(self.context,
'delete_policy_dscp_marking_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_dscp_marking_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(self.context,
'update_alias_dscp_marking_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_dscp_marking_rule',
self.alt_target)
class ProjectMemberQosDSCPMarkingRuleTests(
ProjectAdminQosDSCPMarkingRuleTests):
def setUp(self):
super(ProjectMemberQosDSCPMarkingRuleTests, self).setUp()
self.context = self.project_member_ctx
def test_create_policy_dscp_marking_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_dscp_marking_rule',
self.alt_target)
def test_update_policy_dscp_marking_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_dscp_marking_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_dscp_marking_rule',
self.alt_target)
def test_delete_policy_dscp_marking_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_dscp_marking_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_dscp_marking_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_dscp_marking_rule',
self.alt_target)
class ProjectReaderQosDSCPMarkingRuleTests(
ProjectMemberQosDSCPMarkingRuleTests):
def setUp(self):
super(ProjectReaderQosDSCPMarkingRuleTests, self).setUp()
self.context = self.project_reader_ctx
class SystemAdminQosMinimumBandwidthRuleTests(QosRulesAPITestCase):
def setUp(self):
super(SystemAdminQosMinimumBandwidthRuleTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_policy_minimum_bandwidth_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_policy_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_policy_minimum_bandwidth_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_alias_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_alias_minimum_bandwidth_rule',
self.alt_target)
def test_create_policy_minimum_bandwidth_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_policy_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_policy_minimum_bandwidth_rule',
self.alt_target)
def test_update_policy_minimum_bandwidth_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_policy_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_policy_minimum_bandwidth_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_alias_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_alias_minimum_bandwidth_rule',
self.alt_target)
def test_delete_policy_minimum_bandwidth_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_policy_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_policy_minimum_bandwidth_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_alias_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_alias_minimum_bandwidth_rule',
self.alt_target)
class SystemMemberQosMinimumBandwidthRuleTests(
SystemAdminQosMinimumBandwidthRuleTests):
def setUp(self):
super(SystemMemberQosMinimumBandwidthRuleTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderQosMinimumBandwidthRuleTests(
SystemMemberQosMinimumBandwidthRuleTests):
def setUp(self):
super(SystemReaderQosMinimumBandwidthRuleTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminQosMinimumBandwidthRuleTests(QosRulesAPITestCase):
def setUp(self):
super(ProjectAdminQosMinimumBandwidthRuleTests, self).setUp()
self.context = self.project_admin_ctx
def test_get_policy_minimum_bandwidth_rule(self):
self.assertTrue(
policy.enforce(
self.context, 'get_policy_minimum_bandwidth_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_policy_minimum_bandwidth_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(
self.context, 'get_alias_minimum_bandwidth_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_alias_minimum_bandwidth_rule',
self.alt_target)
def test_create_policy_minimum_bandwidth_rule(self):
self.assertTrue(
policy.enforce(
self.context, 'create_policy_minimum_bandwidth_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_minimum_bandwidth_rule',
self.alt_target)
def test_update_policy_minimum_bandwidth_rule(self):
self.assertTrue(
policy.enforce(
self.context, 'update_policy_minimum_bandwidth_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_minimum_bandwidth_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(
self.context, 'update_alias_minimum_bandwidth_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_minimum_bandwidth_rule',
self.alt_target)
def test_delete_policy_minimum_bandwidth_rule(self):
self.assertTrue(
policy.enforce(
self.context, 'delete_policy_minimum_bandwidth_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_minimum_bandwidth_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(
self.context, 'delete_alias_minimum_bandwidth_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_minimum_bandwidth_rule',
self.alt_target)
class ProjectMemberQosMinimumBandwidthRuleTests(
ProjectAdminQosMinimumBandwidthRuleTests):
def setUp(self):
super(ProjectMemberQosMinimumBandwidthRuleTests, self).setUp()
self.context = self.project_member_ctx
def test_create_policy_minimum_bandwidth_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_minimum_bandwidth_rule',
self.alt_target)
def test_update_policy_minimum_bandwidth_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_minimum_bandwidth_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_minimum_bandwidth_rule',
self.alt_target)
def test_delete_policy_minimum_bandwidth_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_minimum_bandwidth_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_minimum_bandwidth_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_minimum_bandwidth_rule',
self.alt_target)
class ProjectReaderQosMinimumBandwidthRuleTests(
ProjectMemberQosMinimumBandwidthRuleTests):
def setUp(self):
super(ProjectReaderQosMinimumBandwidthRuleTests, self).setUp()
self.context = self.project_reader_ctx
class SystemAdminQosMinimumPacketRateRuleTests(QosRulesAPITestCase):
def setUp(self):
super(SystemAdminQosMinimumPacketRateRuleTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_policy_minimum_packet_rate_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_policy_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'get_policy_minimum_packet_rate_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_alias_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_alias_minimum_packet_rate_rule',
self.alt_target)
def test_create_policy_minimum_packet_rate_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_policy_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_policy_minimum_packet_rate_rule',
self.alt_target)
def test_update_policy_minimum_packet_rate_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_policy_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_policy_minimum_packet_rate_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_alias_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_alias_minimum_packet_rate_rule',
self.alt_target)
def test_delete_policy_minimum_packet_rate_rule(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_policy_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_policy_minimum_packet_rate_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_minimum_packet_rate_rule',
self.alt_target)
class SystemMemberQosMinimumPacketRateRuleTests(
SystemAdminQosMinimumPacketRateRuleTests):
def setUp(self):
super(SystemMemberQosMinimumPacketRateRuleTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderQosMinimumPacketRateRuleTests(
SystemMemberQosMinimumPacketRateRuleTests):
def setUp(self):
super(SystemReaderQosMinimumPacketRateRuleTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminQosMinimumPacketRateRuleTests(QosRulesAPITestCase):
def setUp(self):
super(ProjectAdminQosMinimumPacketRateRuleTests, self).setUp()
self.context = self.project_admin_ctx
def test_get_policy_minimum_packet_rate_rule(self):
self.assertTrue(
policy.enforce(self.context,
'get_policy_minimum_packet_rate_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_policy_minimum_packet_rate_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(self.context,
'get_alias_minimum_packet_rate_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'get_alias_minimum_packet_rate_rule',
self.alt_target)
def test_create_policy_minimum_packet_rate_rule(self):
self.assertTrue(
policy.enforce(self.context,
'create_policy_minimum_packet_rate_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_minimum_packet_rate_rule',
self.alt_target)
def test_update_policy_minimum_packet_rate_rule(self):
self.assertTrue(
policy.enforce(self.context,
'update_policy_minimum_packet_rate_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_minimum_packet_rate_rule',
self.alt_target)
# And the same for aliases
self.assertTrue(
policy.enforce(self.context,
'update_alias_minimum_packet_rate_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_minimum_packet_rate_rule',
self.alt_target)
def test_delete_policy_minimum_packet_rate_rule(self):
self.assertTrue(
policy.enforce(self.context,
'delete_policy_minimum_packet_rate_rule',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_minimum_packet_rate_rule',
self.alt_target)
class ProjectMemberQosMinimumPacketRateRuleTests(
ProjectAdminQosMinimumPacketRateRuleTests):
def setUp(self):
super(ProjectMemberQosMinimumPacketRateRuleTests, self).setUp()
self.context = self.project_member_ctx
def test_create_policy_minimum_packet_rate_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_policy_minimum_packet_rate_rule',
self.alt_target)
def test_update_policy_minimum_packet_rate_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_policy_minimum_packet_rate_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_alias_minimum_packet_rate_rule',
self.alt_target)
def test_delete_policy_minimum_packet_rate_rule(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_policy_minimum_packet_rate_rule',
self.alt_target)
# And the same for aliases
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_minimum_packet_rate_rule',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_alias_minimum_packet_rate_rule',
self.alt_target)
class ProjectReaderQosMinimumPacketRateRuleTests(
ProjectMemberQosMinimumPacketRateRuleTests):
def setUp(self):
super(ProjectReaderQosMinimumPacketRateRuleTests, self).setUp()
self.context = self.project_reader_ctx
| 34.434515
| 79
| 0.637037
| 4,179
| 44,696
| 6.509931
| 0.04044
| 0.085315
| 0.109355
| 0.154383
| 0.82698
| 0.810807
| 0.805697
| 0.803749
| 0.801691
| 0.772248
| 0
| 0.000253
| 0.291771
| 44,696
| 1,297
| 80
| 34.461064
| 0.859169
| 0.029689
| 0
| 0.869482
| 0
| 0
| 0.127253
| 0.117305
| 0
| 0
| 0
| 0
| 0.167946
| 1
| 0.09309
| false
| 0
| 0.004798
| 0
| 0.135317
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc32a7e51f79d36c91b7e4331c8c4a821c0dc9a4
| 14,634
|
py
|
Python
|
client/unified_client.py
|
szhu3210/Arbitrage-trader
|
d2af5670f7285084f70e36a48c95e796d1fd2d37
|
[
"Apache-2.0"
] | 6
|
2019-04-03T22:33:39.000Z
|
2021-05-07T12:00:00.000Z
|
client/unified_client.py
|
szhu3210/Arbitrage-trader
|
d2af5670f7285084f70e36a48c95e796d1fd2d37
|
[
"Apache-2.0"
] | 2
|
2019-04-03T22:57:41.000Z
|
2019-04-26T07:13:12.000Z
|
client/unified_client.py
|
szhu3210/Arbitrage-trader
|
d2af5670f7285084f70e36a48c95e796d1fd2d37
|
[
"Apache-2.0"
] | 1
|
2019-04-28T07:23:32.000Z
|
2019-04-28T07:23:32.000Z
|
import logging
import time
from config import config_trader, config_coin, api_keys
import ccxt
import ccxt.async
import asyncio
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.WARNING)
# A unified client to link my program to ccxt
# Above this interface there should not be any market specific codes
market_order_detail_type_1 = ['huobipro', 'okex']
class UnifiedClient:
def __init__(self, market_name, async=False):
self.client = eval(('ccxt.' if not async else 'ccxt.async.') + market_name)({
'apiKey': api_keys.keys[market_name]['access'],
'secret': api_keys.keys[market_name]['secret'],
'nonce': ccxt.Exchange.microseconds,
# 'verbose': True,
})
def get_tickers(self, async=False):
if async:
return self.get_tickers_async()
tickers = self.client.fetch_tickers()
res = {}
for symbol in tickers:
res[symbol] = '%.8f' % tickers[symbol]['last']
return res
async def get_tickers_async(self):
tickers = await self.client.fetch_tickers()
res = {}
for symbol in tickers:
res[symbol] = '%.8f' % tickers[symbol]['last']
return res
def get_balances(self, all_currency=True, ignore_zero=False, limited=True):
try_count = 0
while try_count < 5:
try:
balances = self.client.fetch_total_balance() if all_currency else self.client.fetch_free_balance()
res = {}
for currency in balances:
if (not limited or currency in config_coin.currency_list['standard']) \
and (not ignore_zero or float(balances[currency]) > 0):
res[currency] = '%.8f' % balances[currency]
return res
except BaseException as err:
logging.warning('Error occurred in get_balances: %s. Try again.' % err)
raise BaseException('Error in get_balances. Have tried 5 times.')
async def get_balances_async(self, all_currency=True, ignore_zero=False, limited=True):
balances = (await self.client.fetch_total_balance()) \
if all_currency else (await self.client.fetch_free_balance())
res = {}
for currency in balances:
if (not limited or currency in config_coin.currency_list['standard']) \
and (not ignore_zero or float(balances[currency]) > 0):
res[currency] = '%.8f' % balances[currency]
return res
def get_balance(self, currency):
return self.get_balances()[currency]
def get_available_balances(self):
res = {}
balances = self.get_balances()
for c in balances:
if float(balances[c]) != 0:
res[c] = balances[c]
return res
def print_available_balances(self):
balances = self.get_balances()
for c in balances:
if float(balances[c]) != 0:
logging.warning('%5s %10s' % (c, balances[c]))
def cal_buy_price(self, currency_pair, size, async=False):
if async:
return self.cal_buy_price_async(currency_pair, size)
size = float(size)
asks = self.client.fetch_order_book(currency_pair)['asks']
# logging.warning(asks)
total = 0
rest = size
price_range = []
for ask in asks:
price_s, volume_s = ask
price, volume = map(float, ask)
if volume > rest:
total += price*rest
price_range.append([price_s, rest])
rest = 0
break
total += price*volume
rest -= volume
price_range.append(ask)
status = (rest <= 0)
return status, '%.8f' % (total/size), price_range
async def cal_buy_price_async(self, currency_pair, size):
size = float(size)
asks = await self.client.fetch_order_book(currency_pair)
asks = asks['asks']
# logging.warning(asks)
total = 0
rest = size
price_range = []
for ask in asks:
price_s, volume_s = ask
price, volume = map(float, ask)
if volume > rest:
total += price*rest
price_range.append([price_s, rest])
rest = 0
break
total += price*volume
rest -= volume
price_range.append(ask)
status = (rest <= 0)
return status, '%.8f' % (total/size), price_range
def buy_coin(self, base_currency, quote_currency, amount):
"""
Buy coin according to the market.
:param base_currency: string, uppercase
:param quote_currency: string, uppercase
:param amount: string
:return: None
"""
currency_pair = base_currency + '/' + quote_currency
balance_quote_currency = self.get_balance(quote_currency)
# reserve enough market space
market_available, avg_price, price_range = self.cal_buy_price(currency_pair, float(amount) * 2)
# buy price, raise 5 % limit to ensure trade success
price = '%.8f' % (float(price_range[-1][0]) * 1.08)
# check balance of quote currency
if float(balance_quote_currency) > float(price)*float(amount)*1.01:
# check market status (if it is big enough for this order)
if market_available:
logging.warning('Buying %s: amount = %s, price = %s, total = %.6f' %
(base_currency, amount, price, float(price)*float(amount)))
order_detail = self.client.create_order(currency_pair, 'limit', 'buy', float(amount), price=price)
# print(order_detail)
if self.client.id in market_order_detail_type_1:
time.sleep(1.0)
deal_amount = '0.0'
orders = self.client.fetch_orders(symbol=currency_pair, params={'status': 'closed'})
for record in orders:
if str(record['id']) == order_detail['id']:
deal_amount = str(record['filled'])
break
else: # poloniex, etc.
deal_amount = str(sum([float(trade['amount']) for trade in order_detail['trades']]))
logging.warning(order_detail)
logging.warning('Traded amount: %s / %s (%.0f%%)' %
(deal_amount, amount, 100*float(deal_amount)/float(amount)))
if abs(float(deal_amount)-float(amount))/float(amount) < 0.01: # more than 99% of orders completed
logging.warning('Order successfully traded!')
else:
logging.warning('Order is good but trade failed! Please handle exceptions manually.')
return order_detail
else:
logging.warning('Market not enough for buying %s of amount %s.' % (base_currency, amount))
else:
logging.warning('Not enough balance available to buy %s of amount %s.' % (base_currency, amount))
logging.warning('Order failed. Please handle exceptions manually.')
def cal_sell_price(self, currency_pair, size, async=False):
if async:
return self.cal_sell_price_async(currency_pair, size)
size = float(size)
bids = self.client.fetch_order_book(currency_pair)['bids']
# logging.warning(bids)
total = 0
rest = size
price_range = []
for bid in bids:
price_s, volume_s = bid
price, volume = map(float, bid)
if volume > rest:
total += price*rest
price_range.append([price_s, rest])
rest = 0
break
total += price*volume
rest -= volume
price_range.append(bid)
status = (rest <= 0)
return status, '%.8f' % (total/size), price_range
async def cal_sell_price_async(self, currency_pair, size):
size = float(size)
bids = await self.client.fetch_order_book(currency_pair)
bids = bids['bids']
# logging.warning(bids)
total = 0
rest = size
price_range = []
for bid in bids:
price_s, volume_s = bid
price, volume = map(float, bid)
if volume > rest:
total += price*rest
price_range.append([price_s, rest])
rest = 0
break
total += price*volume
rest -= volume
price_range.append(bid)
status = (rest <= 0)
return status, '%.8f' % (total/size), price_range
def sell_coin(self, base_currency, quote_currency, amount):
"""
Sell coin according to the market.
:param base_currency: string, uppercase
:param quote_currency: string, uppercase
:param amount: string
:return: None
"""
currency_pair = base_currency + '/' + quote_currency
balance_base_currency = self.get_balance(base_currency)
# reserve enough market space
market_available, avg_price, price_range = self.cal_sell_price(currency_pair, float(amount)*2)
# sell price, lower 8 % limit to ensure trade success
price = '%.8f' % (float(price_range[-1][0]) * 0.92)
# check balance of base currency
if float(balance_base_currency) > float(amount):
# check market status (if it is big enough for this order)
if market_available:
logging.warning('Selling %s: amount = %s, price = %s' % (base_currency, amount, price))
order_detail = self.client.create_order(currency_pair, 'limit', 'sell', float(amount), price=price)
# print(order_detail)
if self.client.id in market_order_detail_type_1:
time.sleep(1.0)
deal_amount = '0.0'
orders = self.client.fetch_orders(symbol=currency_pair, params={'status': 'closed'})
for record in orders:
if str(record['id']) == order_detail['id']:
deal_amount = str(record['filled'])
break
else: # poloniex, etc.
deal_amount = str(sum([float(trade['amount']) for trade in order_detail['trades']]))
logging.warning(order_detail)
logging.warning('Traded amount: %s / %s (%.0f%%)' %
(deal_amount, amount, 100*float(deal_amount)/float(amount)))
if abs(float(deal_amount)-float(amount))/float(amount) < 0.01: # more than 99% of orders completed
logging.warning('Order successfully traded!')
else:
logging.warning('Order is good but trade failed! Please handle exceptions manually.')
return order_detail
else:
logging.warning('Market not enough for selling %s of amount %s.' % (base_currency, amount))
else:
logging.warning('Not enough %s balance for selling (selling amount: %s).' % (base_currency, amount))
logging.warning('Order failed. Please handle exceptions manually.')
def get_ticker_with_size(self, base_currency, quote_currency, base_currency_trade_size=None, async=False):
if async:
return self.get_ticker_with_size_async(base_currency, quote_currency, base_currency_trade_size)
if not base_currency_trade_size:
base_currency_trade_size = config_trader.trade_size[base_currency]
ticker = {}
currency_pair = base_currency + '/' + quote_currency
ticker['ask'] = self.cal_buy_price(currency_pair=currency_pair,
size=base_currency_trade_size)[1]
ticker['bid'] = self.cal_sell_price(currency_pair=currency_pair,
size=base_currency_trade_size)[1]
return ticker
async def get_ticker_with_size_async(self, base_currency, quote_currency, base_currency_trade_size=None):
if not base_currency_trade_size:
base_currency_trade_size = config_trader.trade_size[base_currency]
ticker = {}
currency_pair = base_currency + '/' + quote_currency
ticker['ask'] = (await self.cal_buy_price_async(currency_pair=currency_pair,
size=base_currency_trade_size))[1]
ticker['bid'] = (await self.cal_sell_price_async(currency_pair=currency_pair,
size=base_currency_trade_size))[1]
return ticker
if __name__ == '__main__':
trader = UnifiedClient('okex')
# print(trader.client.urls['api'])
# print(trader.get_tickers())
print(trader.get_balances())
# print(trader.get_balance('LTC'))
# print(trader.get_available_balances())
# print(trader.print_available_balances())
# print(trader.cal_buy_price('LTC/USDT', size='0.01'))
# print(trader.client.fetch_orders(symbol='XRP/USDT', params={'status': 'closed'}))
# print(trader.buy_coin('LTC', 'USDT', '0.01'))
# print(trader.sell_coin('LTC', 'USDT', '0.01'))
# print(trader.get_ticker_with_size('XRP', 'USDT'))
# print(trader.get_balances())
# trader = UnifiedClient('poloniex', async=True)
# print(trader.client.urls['api'])
# print(trader.get_balances())
# print(trader.get_balance('LTC'))
# print(trader.get_available_balances())
# print(trader.print_available_balances())
# print(trader.cal_buy_price('LTC/USDT', size='0.01'))
# print(trader.client.fetch_orders(symbol='XRP/USDT', params={'status': 'closed'}))
# print(trader.buy_coin('LTC', 'USDT', '0.01'))
# print(trader.sell_coin('LTC', 'USDT', '0.01'))
# print(trader.get_ticker_with_size('XRP', 'USDT'))
# print(trader.get_balances())
# trader = UnifiedClient('poloniex', async=True)
# print(trader.client.urls['api'])
# tasks = []
# task = asyncio.ensure_future(trader.get_tickers(async=True))
# tasks.append(task)
# task = asyncio.ensure_future(trader.get_ticker_with_size('XRP', 'USDT', async=True))
# tasks.append(task)
# loop = asyncio.get_event_loop()
# done, pending = loop.run_until_complete(asyncio.wait(tasks))
# results = [future.result() for future in done]
# print(results)
# loop.close()
pass
| 41.811429
| 115
| 0.584256
| 1,714
| 14,634
| 4.789965
| 0.124271
| 0.049695
| 0.021924
| 0.028136
| 0.813276
| 0.788794
| 0.77162
| 0.745432
| 0.705968
| 0.661876
| 0
| 0.009919
| 0.304223
| 14,634
| 349
| 116
| 41.931232
| 0.796405
| 0.146371
| 0
| 0.669456
| 0
| 0
| 0.084512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.004184
| 0.025105
| null | null | 0.008368
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
70b536bd5a5648c2ef927df2b5ef56a8a67ef3e6
| 9,002
|
py
|
Python
|
netbox_dns/tests/test_api.py
|
hbasria/netbox-dns
|
ca6c715eee0e5a6b519906fae88c5d476c356a2d
|
[
"MIT"
] | null | null | null |
netbox_dns/tests/test_api.py
|
hbasria/netbox-dns
|
ca6c715eee0e5a6b519906fae88c5d476c356a2d
|
[
"MIT"
] | null | null | null |
netbox_dns/tests/test_api.py
|
hbasria/netbox-dns
|
ca6c715eee0e5a6b519906fae88c5d476c356a2d
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from utilities.testing import APITestCase
from netbox_dns.models import NameServer, Record, Zone
class ZoneAPITestCase(APITestCase):
"""
Tests for Zone API (format=json)
"""
def test_view_zone_without_permission(self):
url = reverse("plugins-api:netbox_dns-api:zone-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 403)
def test_view_zone_with_permission(self):
self.add_permissions("netbox_dns.view_zone")
url = reverse("plugins-api:netbox_dns-api:zone-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_view_zone_detail_with_permission(self):
self.add_permissions("netbox_dns.view_zone")
zone = Zone.objects.create(name="asdf")
url = reverse("plugins-api:netbox_dns-api:zone-detail", kwargs={"pk": zone.id})
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_add_zone_with_permission(self):
self.add_permissions("netbox_dns.add_zone")
url = reverse("plugins-api:netbox_dns-api:zone-list")
response = self.client.post(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 201)
def test_add_zone_without_permission(self):
url = reverse("plugins-api:netbox_dns-api:zone-list")
response = self.client.post(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 403)
def test_delete_zone_with_permission(self):
self.add_permissions("netbox_dns.delete_zone")
zone = Zone.objects.create(name="asdf")
url = reverse("plugins-api:netbox_dns-api:zone-detail", kwargs={"pk": zone.id})
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 204)
def test_delete_zone_without_permission(self):
zone = Zone.objects.create(name="asdf")
url = reverse("plugins-api:netbox_dns-api:zone-detail", kwargs={"pk": zone.id})
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 403)
class NameServerAPITestCase(APITestCase):
"""
Tests for NameServer API (format=json)
"""
def test_list_nameserver_without_permission(self):
url = reverse("plugins-api:netbox_dns-api:nameserver-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 403)
def test_list_nameserver_with_permission(self):
self.add_permissions("netbox_dns.view_nameserver")
url = reverse("plugins-api:netbox_dns-api:nameserver-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_view_nameserver_detail_with_permission(self):
self.add_permissions("netbox_dns.view_nameserver")
nameserver = NameServer.objects.create(name="asdf")
url = reverse(
"plugins-api:netbox_dns-api:nameserver-detail", kwargs={"pk": nameserver.id}
)
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_add_nameserver_with_permission(self):
self.add_permissions("netbox_dns.add_nameserver")
url = reverse("plugins-api:netbox_dns-api:nameserver-list")
response = self.client.post(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 201)
def test_add_nameserver_without_permission(self):
url = reverse("plugins-api:netbox_dns-api:nameserver-list")
response = self.client.post(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 403)
def test_delete_nameserver_with_permission(self):
self.add_permissions("netbox_dns.delete_nameserver")
nameserver = NameServer.objects.create(name="asdf")
url = reverse(
"plugins-api:netbox_dns-api:nameserver-detail", kwargs={"pk": nameserver.id}
)
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 204)
def test_delete_nameserver_without_permission(self):
nameserver = NameServer.objects.create(name="asdf")
url = reverse(
"plugins-api:netbox_dns-api:nameserver-detail", kwargs={"pk": nameserver.id}
)
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 403)
class RecordAPITestCase(APITestCase):
"""
Tests for Record API (format=json)
"""
def test_view_record_without_permission(self):
url = reverse("plugins-api:netbox_dns-api:record-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 403)
def test_view_record_with_permission(self):
self.add_permissions("netbox_dns.view_record")
url = reverse("plugins-api:netbox_dns-api:record-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_view_record_detail_without_permission(self):
zone = Zone.objects.create(name="zone.com")
record = Record.objects.create(
zone=zone,
type=Record.A,
name="Record 1",
value="Value 1",
ttl=100,
)
url = reverse(
"plugins-api:netbox_dns-api:record-detail", kwargs={"pk": record.id}
)
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 403)
def test_view_record_detail_with_permission(self):
self.add_permissions("netbox_dns.view_record")
zone = Zone.objects.create(name="zone.com")
record = Record.objects.create(
zone=zone,
type=Record.A,
name="Record 1",
value="Value 1",
ttl=100,
)
url = reverse(
"plugins-api:netbox_dns-api:record-detail", kwargs={"pk": record.id}
)
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_add_record_with_permission(self):
self.add_permissions("netbox_dns.add_record")
zone = Zone.objects.create(name="zone.com")
url = reverse("plugins-api:netbox_dns-api:record-list")
data = {
"zone": zone.id,
"type": Record.A,
"name": "Record 1",
"value": "Value 1",
"ttl": 100,
}
response = self.client.post(f"{url}?format=json", data, **self.header)
self.assertEqual(response.status_code, 201)
def test_add_zone_without_permission(self):
zone = Zone.objects.create(name="zone.com")
url = reverse("plugins-api:netbox_dns-api:record-list")
data = {
"zone": zone.id,
"type": Record.A,
"name": "Record 1",
"value": "Value 1",
"ttl": 100,
}
response = self.client.post(f"{url}?format=json", data, **self.header)
self.assertEqual(response.status_code, 403)
def test_delete_record_with_permission(self):
self.add_permissions("netbox_dns.delete_record")
zone = Zone.objects.create(name="zone.com")
record = Record.objects.create(
zone=zone,
type=Record.A,
name="Record 1",
value="Value 1",
ttl=100,
)
url = reverse(
"plugins-api:netbox_dns-api:record-detail", kwargs={"pk": record.id}
)
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 204)
def test_delete_zone_without_permission(self):
zone = Zone.objects.create(name="zone.com")
record = Record.objects.create(
zone=zone,
type=Record.A,
name="Record 1",
value="Value 1",
ttl=100,
)
url = reverse(
"plugins-api:netbox_dns-api:record-detail", kwargs={"pk": record.id}
)
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 403)
| 36.445344
| 88
| 0.620418
| 1,085
| 9,002
| 4.988018
| 0.059908
| 0.058204
| 0.069106
| 0.081301
| 0.944013
| 0.938101
| 0.929231
| 0.929231
| 0.928123
| 0.863267
| 0
| 0.015529
| 0.241724
| 9,002
| 246
| 89
| 36.593496
| 0.777322
| 0.011775
| 0
| 0.726804
| 0
| 0
| 0.208023
| 0.122712
| 0
| 0
| 0
| 0
| 0.113402
| 1
| 0.113402
| false
| 0
| 0.015464
| 0
| 0.14433
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb8bef3603989b8a51a50a8fb96fb579c434e068
| 22,959
|
py
|
Python
|
custos-client-sdks/custos-python-sdk/custos/server/core/ResourceSecretService_pb2_grpc.py
|
hasithajayasundara/airavata-custos
|
2d341849dd8ea8a7c2efec6cc73b01dfd495352e
|
[
"Apache-2.0"
] | null | null | null |
custos-client-sdks/custos-python-sdk/custos/server/core/ResourceSecretService_pb2_grpc.py
|
hasithajayasundara/airavata-custos
|
2d341849dd8ea8a7c2efec6cc73b01dfd495352e
|
[
"Apache-2.0"
] | null | null | null |
custos-client-sdks/custos-python-sdk/custos/server/core/ResourceSecretService_pb2_grpc.py
|
hasithajayasundara/airavata-custos
|
2d341849dd8ea8a7c2efec6cc73b01dfd495352e
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import ResourceSecretService_pb2 as ResourceSecretService__pb2
class ResourceSecretServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.getSecret = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/getSecret',
request_serializer=ResourceSecretService__pb2.GetSecretRequest.SerializeToString,
response_deserializer=ResourceSecretService__pb2.SecretMetadata.FromString,
)
self.getResourceCredentialSummary = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/getResourceCredentialSummary',
request_serializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
response_deserializer=ResourceSecretService__pb2.SecretMetadata.FromString,
)
self.getAllResourceCredentialSummaries = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/getAllResourceCredentialSummaries',
request_serializer=ResourceSecretService__pb2.GetResourceCredentialSummariesRequest.SerializeToString,
response_deserializer=ResourceSecretService__pb2.ResourceCredentialSummaries.FromString,
)
self.addSSHCredential = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/addSSHCredential',
request_serializer=ResourceSecretService__pb2.SSHCredential.SerializeToString,
response_deserializer=ResourceSecretService__pb2.AddResourceCredentialResponse.FromString,
)
self.addPasswordCredential = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/addPasswordCredential',
request_serializer=ResourceSecretService__pb2.PasswordCredential.SerializeToString,
response_deserializer=ResourceSecretService__pb2.AddResourceCredentialResponse.FromString,
)
self.addCertificateCredential = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/addCertificateCredential',
request_serializer=ResourceSecretService__pb2.CertificateCredential.SerializeToString,
response_deserializer=ResourceSecretService__pb2.AddResourceCredentialResponse.FromString,
)
self.getSSHCredential = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/getSSHCredential',
request_serializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
response_deserializer=ResourceSecretService__pb2.SSHCredential.FromString,
)
self.getPasswordCredential = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/getPasswordCredential',
request_serializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
response_deserializer=ResourceSecretService__pb2.PasswordCredential.FromString,
)
self.getCertificateCredential = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/getCertificateCredential',
request_serializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
response_deserializer=ResourceSecretService__pb2.CertificateCredential.FromString,
)
self.deleteSSHCredential = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/deleteSSHCredential',
request_serializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
response_deserializer=ResourceSecretService__pb2.ResourceCredentialOperationStatus.FromString,
)
self.deletePWDCredential = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/deletePWDCredential',
request_serializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
response_deserializer=ResourceSecretService__pb2.ResourceCredentialOperationStatus.FromString,
)
self.deleteCertificateCredential = channel.unary_unary(
'/org.apache.custos.resource.secret.service.ResourceSecretService/deleteCertificateCredential',
request_serializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
response_deserializer=ResourceSecretService__pb2.ResourceCredentialOperationStatus.FromString,
)
class ResourceSecretServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def getSecret(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getResourceCredentialSummary(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAllResourceCredentialSummaries(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def addSSHCredential(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def addPasswordCredential(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def addCertificateCredential(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getSSHCredential(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getPasswordCredential(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getCertificateCredential(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteSSHCredential(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deletePWDCredential(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteCertificateCredential(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ResourceSecretServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'getSecret': grpc.unary_unary_rpc_method_handler(
servicer.getSecret,
request_deserializer=ResourceSecretService__pb2.GetSecretRequest.FromString,
response_serializer=ResourceSecretService__pb2.SecretMetadata.SerializeToString,
),
'getResourceCredentialSummary': grpc.unary_unary_rpc_method_handler(
servicer.getResourceCredentialSummary,
request_deserializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.FromString,
response_serializer=ResourceSecretService__pb2.SecretMetadata.SerializeToString,
),
'getAllResourceCredentialSummaries': grpc.unary_unary_rpc_method_handler(
servicer.getAllResourceCredentialSummaries,
request_deserializer=ResourceSecretService__pb2.GetResourceCredentialSummariesRequest.FromString,
response_serializer=ResourceSecretService__pb2.ResourceCredentialSummaries.SerializeToString,
),
'addSSHCredential': grpc.unary_unary_rpc_method_handler(
servicer.addSSHCredential,
request_deserializer=ResourceSecretService__pb2.SSHCredential.FromString,
response_serializer=ResourceSecretService__pb2.AddResourceCredentialResponse.SerializeToString,
),
'addPasswordCredential': grpc.unary_unary_rpc_method_handler(
servicer.addPasswordCredential,
request_deserializer=ResourceSecretService__pb2.PasswordCredential.FromString,
response_serializer=ResourceSecretService__pb2.AddResourceCredentialResponse.SerializeToString,
),
'addCertificateCredential': grpc.unary_unary_rpc_method_handler(
servicer.addCertificateCredential,
request_deserializer=ResourceSecretService__pb2.CertificateCredential.FromString,
response_serializer=ResourceSecretService__pb2.AddResourceCredentialResponse.SerializeToString,
),
'getSSHCredential': grpc.unary_unary_rpc_method_handler(
servicer.getSSHCredential,
request_deserializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.FromString,
response_serializer=ResourceSecretService__pb2.SSHCredential.SerializeToString,
),
'getPasswordCredential': grpc.unary_unary_rpc_method_handler(
servicer.getPasswordCredential,
request_deserializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.FromString,
response_serializer=ResourceSecretService__pb2.PasswordCredential.SerializeToString,
),
'getCertificateCredential': grpc.unary_unary_rpc_method_handler(
servicer.getCertificateCredential,
request_deserializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.FromString,
response_serializer=ResourceSecretService__pb2.CertificateCredential.SerializeToString,
),
'deleteSSHCredential': grpc.unary_unary_rpc_method_handler(
servicer.deleteSSHCredential,
request_deserializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.FromString,
response_serializer=ResourceSecretService__pb2.ResourceCredentialOperationStatus.SerializeToString,
),
'deletePWDCredential': grpc.unary_unary_rpc_method_handler(
servicer.deletePWDCredential,
request_deserializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.FromString,
response_serializer=ResourceSecretService__pb2.ResourceCredentialOperationStatus.SerializeToString,
),
'deleteCertificateCredential': grpc.unary_unary_rpc_method_handler(
servicer.deleteCertificateCredential,
request_deserializer=ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.FromString,
response_serializer=ResourceSecretService__pb2.ResourceCredentialOperationStatus.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.apache.custos.resource.secret.service.ResourceSecretService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ResourceSecretService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def getSecret(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/getSecret',
ResourceSecretService__pb2.GetSecretRequest.SerializeToString,
ResourceSecretService__pb2.SecretMetadata.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getResourceCredentialSummary(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/getResourceCredentialSummary',
ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
ResourceSecretService__pb2.SecretMetadata.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getAllResourceCredentialSummaries(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/getAllResourceCredentialSummaries',
ResourceSecretService__pb2.GetResourceCredentialSummariesRequest.SerializeToString,
ResourceSecretService__pb2.ResourceCredentialSummaries.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def addSSHCredential(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/addSSHCredential',
ResourceSecretService__pb2.SSHCredential.SerializeToString,
ResourceSecretService__pb2.AddResourceCredentialResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def addPasswordCredential(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/addPasswordCredential',
ResourceSecretService__pb2.PasswordCredential.SerializeToString,
ResourceSecretService__pb2.AddResourceCredentialResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def addCertificateCredential(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/addCertificateCredential',
ResourceSecretService__pb2.CertificateCredential.SerializeToString,
ResourceSecretService__pb2.AddResourceCredentialResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getSSHCredential(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/getSSHCredential',
ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
ResourceSecretService__pb2.SSHCredential.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getPasswordCredential(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/getPasswordCredential',
ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
ResourceSecretService__pb2.PasswordCredential.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getCertificateCredential(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/getCertificateCredential',
ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
ResourceSecretService__pb2.CertificateCredential.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def deleteSSHCredential(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/deleteSSHCredential',
ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
ResourceSecretService__pb2.ResourceCredentialOperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def deletePWDCredential(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/deletePWDCredential',
ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
ResourceSecretService__pb2.ResourceCredentialOperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def deleteCertificateCredential(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.resource.secret.service.ResourceSecretService/deleteCertificateCredential',
ResourceSecretService__pb2.GetResourceCredentialByTokenRequest.SerializeToString,
ResourceSecretService__pb2.ResourceCredentialOperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 53.393023
| 163
| 0.702644
| 1,718
| 22,959
| 9.153667
| 0.066938
| 0.112934
| 0.023846
| 0.036564
| 0.835241
| 0.796325
| 0.773496
| 0.714931
| 0.651532
| 0.644283
| 0
| 0.004182
| 0.229278
| 22,959
| 429
| 164
| 53.517483
| 0.884544
| 0.047258
| 0
| 0.637097
| 1
| 0
| 0.135374
| 0.106378
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069892
| false
| 0.053763
| 0.005376
| 0.032258
| 0.115591
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
cb964f0bf8842ff029cc408701c71a0a3ebe8269
| 118
|
py
|
Python
|
neurodiffeq_conditions/__init__.py
|
odegym/neurodiffeq-conditions
|
6412ef797557d90b3cc9a7c85072bd7e3b237cfc
|
[
"MIT"
] | 2
|
2021-04-23T09:43:02.000Z
|
2021-04-28T14:11:14.000Z
|
neurodiffeq_conditions/__init__.py
|
odegym/neurodiffeq-conditions
|
6412ef797557d90b3cc9a7c85072bd7e3b237cfc
|
[
"MIT"
] | null | null | null |
neurodiffeq_conditions/__init__.py
|
odegym/neurodiffeq-conditions
|
6412ef797557d90b3cc9a7c85072bd7e3b237cfc
|
[
"MIT"
] | null | null | null |
from .conditions import Condition3D, ConditionComponent3D, ConditionComponent, ComposedCondition3D, ComposedCondition
| 59
| 117
| 0.889831
| 8
| 118
| 13.125
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027273
| 0.067797
| 118
| 1
| 118
| 118
| 0.927273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cbded0c5af9870dd8ed44416547f20ac1ecab246
| 5,053
|
py
|
Python
|
tests/parser/24-Labyrinth.asp.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/24-Labyrinth.asp.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/24-Labyrinth.asp.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
dir(e). dir(w). dir(n). dir(s).
inverse(e,w). inverse(w,e).
inverse(n,s). inverse(s,n).
row(X) :- field(X,Y).
col(Y) :- field(X,Y).
num_rows(X) :- row(X), not row(XX), XX = X+1.
num_cols(Y) :- col(Y), not col(YY), YY = Y+1.
goal(X,Y,0) :- goal_on(X,Y).
reach(X,Y,0) :- init_on(X,Y).
conn(X,Y,D,0) :- connect(X,Y,D).
step(S) :- max_steps(S), 0 < S.
step(T) :- step(S), T = S-1, 1 < S.
%% Direct neighbors
dneighbor(n,X,Y,XX,Y) :- field(X,Y), field(XX,Y), XX = X+1.
dneighbor(s,X,Y,XX,Y) :- field(X,Y), field(XX,Y), XX = X-1.
dneighbor(e,X,Y,X,YY) :- field(X,Y), field(X,YY), YY = Y+1.
dneighbor(w,X,Y,X,YY) :- field(X,Y), field(X,YY), YY = Y-1.
%% All neighboring fields
neighbor(D,X,Y,XX,YY) :- dneighbor(D,X,Y,XX,YY).
neighbor(n,X,Y, 1, Y) :- field(X,Y), num_rows(X).
neighbor(s,1,Y, X, Y) :- field(X,Y), num_rows(X).
neighbor(e,X,Y, X, 1) :- field(X,Y), num_cols(Y).
neighbor(w,X,1, X, Y) :- field(X,Y), num_cols(Y).
%% Select a row or column to push
neg_goal(T) :- goal(X,Y,T), not reach(X,Y,T).
rrpush(T) :- step(T), neg_goal(S), S = T-1, not ccpush(T).
ccpush(T) :- step(T), neg_goal(S), S = T-1, not rrpush(T).
orpush(X,T) :- row(X), row(XX), rpush(XX,T), X != XX.
ocpush(Y,T) :- col(Y), col(YY), cpush(YY,T), Y != YY.
rpush(X,T) :- row(X), rrpush(T), not orpush(X,T).
cpush(Y,T) :- col(Y), ccpush(T), not ocpush(Y,T).
push(X,e,T) :- rpush(X,T), not push(X,w,T).
push(X,w,T) :- rpush(X,T), not push(X,e,T).
push(Y,n,T) :- cpush(Y,T), not push(Y,s,T).
push(Y,s,T) :- cpush(Y,T), not push(Y,n,T).
%% Determine new position of a (pushed) field
shift(XX,YY,X,Y,T) :- neighbor(e,XX,YY,X,Y), push(XX,e,T), step(T).
shift(XX,YY,X,Y,T) :- neighbor(w,XX,YY,X,Y), push(XX,w,T), step(T).
shift(XX,YY,X,Y,T) :- neighbor(n,XX,YY,X,Y), push(YY,n,T), step(T).
shift(XX,YY,X,Y,T) :- neighbor(s,XX,YY,X,Y), push(YY,s,T), step(T).
shift( X, Y,X,Y,T) :- field(X,Y), not push(X,e,T), not push(X,w,T), not push(Y,n,T), not push(Y,s,T), step(T).
%% Move connections around
conn(X,Y,D,T) :- conn(XX,YY,D,S), S = T-1, dir(D), shift(XX,YY,X,Y,T), step(T).
%% Location of goal after pushing
goal(X,Y,T) :- goal(XX,YY,S), S = T-1, shift(XX,YY,X,Y,T), step(T).
%% Locations reachable from new position
reach(X,Y,T) :- reach(XX,YY,S), S = T-1, shift(XX,YY,X,Y,T), step(T).
reach(X,Y,T) :- reach(XX,YY,T), dneighbor(D,XX,YY,X,Y), conn(XX,YY,D,T), conn(X,Y,E,T), inverse(D,E), step(T).
%% Goal must be reached
:- neg_goal(S), max_steps(S).
%% Project output
% #hide.
% #show push(Z,D,T).
"""
output = """
dir(e). dir(w). dir(n). dir(s).
inverse(e,w). inverse(w,e).
inverse(n,s). inverse(s,n).
row(X) :- field(X,Y).
col(Y) :- field(X,Y).
num_rows(X) :- row(X), not row(XX), XX = X+1.
num_cols(Y) :- col(Y), not col(YY), YY = Y+1.
goal(X,Y,0) :- goal_on(X,Y).
reach(X,Y,0) :- init_on(X,Y).
conn(X,Y,D,0) :- connect(X,Y,D).
step(S) :- max_steps(S), 0 < S.
step(T) :- step(S), T = S-1, 1 < S.
%% Direct neighbors
dneighbor(n,X,Y,XX,Y) :- field(X,Y), field(XX,Y), XX = X+1.
dneighbor(s,X,Y,XX,Y) :- field(X,Y), field(XX,Y), XX = X-1.
dneighbor(e,X,Y,X,YY) :- field(X,Y), field(X,YY), YY = Y+1.
dneighbor(w,X,Y,X,YY) :- field(X,Y), field(X,YY), YY = Y-1.
%% All neighboring fields
neighbor(D,X,Y,XX,YY) :- dneighbor(D,X,Y,XX,YY).
neighbor(n,X,Y, 1, Y) :- field(X,Y), num_rows(X).
neighbor(s,1,Y, X, Y) :- field(X,Y), num_rows(X).
neighbor(e,X,Y, X, 1) :- field(X,Y), num_cols(Y).
neighbor(w,X,1, X, Y) :- field(X,Y), num_cols(Y).
%% Select a row or column to push
neg_goal(T) :- goal(X,Y,T), not reach(X,Y,T).
rrpush(T) :- step(T), neg_goal(S), S = T-1, not ccpush(T).
ccpush(T) :- step(T), neg_goal(S), S = T-1, not rrpush(T).
orpush(X,T) :- row(X), row(XX), rpush(XX,T), X != XX.
ocpush(Y,T) :- col(Y), col(YY), cpush(YY,T), Y != YY.
rpush(X,T) :- row(X), rrpush(T), not orpush(X,T).
cpush(Y,T) :- col(Y), ccpush(T), not ocpush(Y,T).
push(X,e,T) :- rpush(X,T), not push(X,w,T).
push(X,w,T) :- rpush(X,T), not push(X,e,T).
push(Y,n,T) :- cpush(Y,T), not push(Y,s,T).
push(Y,s,T) :- cpush(Y,T), not push(Y,n,T).
%% Determine new position of a (pushed) field
shift(XX,YY,X,Y,T) :- neighbor(e,XX,YY,X,Y), push(XX,e,T), step(T).
shift(XX,YY,X,Y,T) :- neighbor(w,XX,YY,X,Y), push(XX,w,T), step(T).
shift(XX,YY,X,Y,T) :- neighbor(n,XX,YY,X,Y), push(YY,n,T), step(T).
shift(XX,YY,X,Y,T) :- neighbor(s,XX,YY,X,Y), push(YY,s,T), step(T).
shift( X, Y,X,Y,T) :- field(X,Y), not push(X,e,T), not push(X,w,T), not push(Y,n,T), not push(Y,s,T), step(T).
%% Move connections around
conn(X,Y,D,T) :- conn(XX,YY,D,S), S = T-1, dir(D), shift(XX,YY,X,Y,T), step(T).
%% Location of goal after pushing
goal(X,Y,T) :- goal(XX,YY,S), S = T-1, shift(XX,YY,X,Y,T), step(T).
%% Locations reachable from new position
reach(X,Y,T) :- reach(XX,YY,S), S = T-1, shift(XX,YY,X,Y,T), step(T).
reach(X,Y,T) :- reach(XX,YY,T), dneighbor(D,XX,YY,X,Y), conn(XX,YY,D,T), conn(X,Y,E,T), inverse(D,E), step(T).
%% Goal must be reached
:- neg_goal(S), max_steps(S).
%% Project output
% #hide.
% #show push(Z,D,T).
"""
| 31
| 110
| 0.570552
| 1,178
| 5,053
| 2.423599
| 0.062818
| 0.06725
| 0.02732
| 0.050438
| 0.996147
| 0.996147
| 0.996147
| 0.996147
| 0.996147
| 0.996147
| 0
| 0.009662
| 0.139719
| 5,053
| 162
| 111
| 31.191358
| 0.647113
| 0
| 0
| 0.981818
| 0
| 0.527273
| 0.993865
| 0.094597
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
381f65e5fb352d637eb7e8f107847e992d651332
| 2,500
|
py
|
Python
|
elfsample4.py
|
TheMindVirus/pico-uf22elf
|
ee5d95208851e6eba4b21675cae66fdf07176d0e
|
[
"MIT"
] | null | null | null |
elfsample4.py
|
TheMindVirus/pico-uf22elf
|
ee5d95208851e6eba4b21675cae66fdf07176d0e
|
[
"MIT"
] | null | null | null |
elfsample4.py
|
TheMindVirus/pico-uf22elf
|
ee5d95208851e6eba4b21675cae66fdf07176d0e
|
[
"MIT"
] | null | null | null |
data = b""
data += b"\x7F\x45\x4C\x46" # ELF
data += b"\x02\x02\x02" # ELF64
data += b"\x20\x00" # ABI
data += b"\x00\x00\x00\x00\x00\x00\x00" # Pad
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Type Machine Version
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Start Address
data += b"\x00\x00\x00\x00\x00\x00\x00\x40" # Program Header
data += b"\x00\x00\x00\x00\x00\x00\x00\x78" # Section Header
data += b"\x00\x00\x00\x00" # Flags
data += b"\x00\x40" # Size of This Header
data += b"\x00\x38" # Size of Program Header
data += b"\x00\x01" # Number of Program Headers
data += b"\x00\x40" # Size of Section Headers
data += b"\x00\x02" # Number of Section Headers
data += b"\x00\x01" # Section Index Containing Labels
data += b"\x00\x00\x00\x01" # Segment Type (LOAD)
data += b"\x00\x00\x00\x00" # Flags (None)
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Offset
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Virtual Address
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Physical Address
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Size in File
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Size in Memory
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Alignment
data += b"\x00\x00\x00\x01" # Section Name in STRTAB
data += b"\x00\x00\x00\x01" # PROGBITS
data += b"\x00\x00\x00\x00\x00\x00\x00\x07" # Flags (WAX)
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Virtual Address
data += b"\x00\x00\x00\x00\x00\x00\x01\x00" # Offset in File
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Size in File
data += b"\x00\x00\x00\x00" # Section Index
data += b"\x00\x00\x00\x00" # Extra Information
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Alignment
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Size of Entry or Zero
data += b"\x00\x00\x00\x07" # Section Name in STRTAB
data += b"\x00\x00\x00\x03" # STRTAB
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Flags (None)
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Virtual Address
data += b"\x00\x00\x00\x00\x00\x00\x00\xF8" # Offset in File
data += b"\x00\x00\x00\x00\x00\x00\x00\x0C" # Size in File
data += b"\x00\x00\x00\x00" # Section Index
data += b"\x00\x00\x00\x00" # Extra Information
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Alignment
data += b"\x00\x00\x00\x00\x00\x00\x00\x00" # Size of Entry or Zero
data += b"\x00.text\x00.data\x00"
data += b"\x00" * (0x100 - len(data))
data += b"\x48\x65\x6C\x6C\x6F\x20\x57\x6F\x72\x6C\x64\x21\x0A\x00" # Hello World!
file = open("sample4.elf", "wb")
file.write(data)
file.close()
print("Done!")
| 41.666667
| 82
| 0.662
| 484
| 2,500
| 3.419421
| 0.161157
| 0.656193
| 0.799396
| 0.819335
| 0.756495
| 0.735952
| 0.670091
| 0.646526
| 0.646526
| 0.559517
| 0
| 0.241663
| 0.1244
| 2,500
| 59
| 83
| 42.372881
| 0.51439
| 0.2664
| 0
| 0.568627
| 0
| 0.019608
| 0.610397
| 0.452767
| 0
| 0
| 0.002795
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.019608
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
69938fdbc791a204a97bae45a48377d8c1b07e99
| 9,558
|
py
|
Python
|
testing_suite/test_monoexonic.py
|
vinay-swamy/TALON
|
ce6f403035a9697334518c39bfe56a4550884699
|
[
"MIT"
] | null | null | null |
testing_suite/test_monoexonic.py
|
vinay-swamy/TALON
|
ce6f403035a9697334518c39bfe56a4550884699
|
[
"MIT"
] | null | null | null |
testing_suite/test_monoexonic.py
|
vinay-swamy/TALON
|
ce6f403035a9697334518c39bfe56a4550884699
|
[
"MIT"
] | null | null | null |
import pytest
from talon import talon, init_refs
from .helper_fns import fetch_correct_ID, get_db_cursor
@pytest.mark.integration
class TestIdentifyMonoexonic(object):
def test_match(self):
""" Example where the transcript is a monoexonic match.
"""
conn, cursor = get_db_cursor()
build = "toy_build"
database = "scratch/toy.db"
talon.get_counters(database)
init_refs.make_temp_novel_gene_table(cursor, build)
init_refs.make_temp_monoexonic_transcript_table(cursor, build)
edge_dict = init_refs.make_edge_dict(cursor)
location_dict = init_refs.make_location_dict(build, cursor)
run_info = talon.init_run_info(database, build)
transcript_dict = init_refs.make_transcript_dict(cursor, build)
vertex_2_gene = init_refs.make_vertex_2_gene_dict(cursor)
gene_starts, gene_ends = init_refs.make_gene_start_and_end_dict(cursor, build)
chrom = "chr4"
strand = "-"
positions = ( 3900, 1100 )
annotation = talon.identify_monoexon_transcript(chrom, positions,
strand, cursor,
location_dict, edge_dict,
transcript_dict, vertex_2_gene,
gene_starts, gene_ends, run_info,
'temp_gene', 'temp_monoexon')
correct_gene_ID = fetch_correct_ID("TG6", "gene", cursor)
correct_transcript_ID = fetch_correct_ID("TG6-001", "transcript", cursor)
assert annotation['gene_ID'] == correct_gene_ID
assert annotation['start_delta'] == 100
assert annotation['end_delta'] == -100
conn.close()
def test_partial_match(self):
""" Example where the transcript overlaps a single-exon transcript,
but is shorter. In the past, the start would be assigned to the
annotated start, and the end would be novel. This is no longer
the case- at this time, the transcript will be assigned to
the annotated match. """
conn, cursor = get_db_cursor()
build = "toy_build"
database = "scratch/toy.db"
talon.get_counters(database)
init_refs.make_temp_novel_gene_table(cursor, build)
init_refs.make_temp_monoexonic_transcript_table(cursor, build)
edge_dict = init_refs.make_edge_dict(cursor)
location_dict = init_refs.make_location_dict(build, cursor)
run_info = talon.init_run_info(database, build)
transcript_dict = init_refs.make_transcript_dict(cursor, build)
vertex_2_gene = init_refs.make_vertex_2_gene_dict(cursor)
gene_starts, gene_ends = init_refs.make_gene_start_and_end_dict(cursor, build)
chrom = "chr4"
strand = "-"
positions = ( 3900, 2900 )
annotation = talon.identify_monoexon_transcript(chrom, positions,
strand, cursor,
location_dict, edge_dict,
transcript_dict, vertex_2_gene,
gene_starts, gene_ends, run_info,
'temp_gene', 'temp_monoexon')
correct_gene_ID = fetch_correct_ID("TG6", "gene", cursor)
correct_transcript_ID = fetch_correct_ID("TG6-001", "transcript", cursor)
assert annotation['gene_ID'] == correct_gene_ID
assert annotation['transcript_ID'] == correct_transcript_ID
assert annotation['start_delta'] == 100
assert annotation['end_delta'] == -1900
conn.close()
# Commenting out these tests for now because they are redundant. But saving in
# case they might be useful down the line.
# def test_partial_match_3prime(self):
# """ Example where the transcript is short, so it overlaps the
# annotated transcript but is not an accepted match.
# the end should get assigned to the annotated end, but the end is
# novel """
#
# conn, cursor = get_db_cursor()
# build = "toy_build"
# database = "scratch/toy.db"
# talon.get_counters(database)
# init_refs.make_temp_novel_gene_table(cursor, build)
# init_refs.make_temp_monoexonic_transcript_table(cursor, build)
# edge_dict = init_refs.make_edge_dict(cursor)
# location_dict = init_refs.make_location_dict(build, cursor)
# run_info = talon.init_run_info(database, build)
# transcript_dict = init_refs.make_transcript_dict(cursor, build)
# vertex_2_gene = init_refs.make_vertex_2_gene_dict(cursor)
# gene_starts, gene_ends = init_refs.make_gene_start_and_end_dict(cursor, build)
#
# chrom = "chr4"
# strand = "-"
# positions = ( 2000, 1100 )
#
# annotation = talon.identify_monoexon_transcript(chrom, positions,
# strand, cursor,
# location_dict, edge_dict,
# transcript_dict, vertex_2_gene,
# gene_starts, gene_ends, run_info,
# 'temp_gene', 'temp_monoexon')
#
# correct_gene_ID = fetch_correct_ID("TG6", "gene", cursor)
# assert annotation['gene_ID'] == correct_gene_ID
# assert annotation['start_delta'] == None
# assert annotation['end_delta'] == -100
#
# conn.close()
#
# def test_overlap_but_no_vertex_match(self):
# """ Example where the transcript is short, so it overlaps the
# annotated transcript but is not an accepted match.
# the start should get assigned to the annotated end, but the end is
# novel """
#
# conn, cursor = get_db_cursor()
# build = "toy_build"
# database = "scratch/toy.db"
# talon.get_counters(database)
# init_refs.make_temp_novel_gene_table(cursor, build)
# init_refs.make_temp_monoexonic_transcript_table(cursor, build)
# edge_dict = init_refs.make_edge_dict(cursor)
# location_dict = init_refs.make_location_dict(build, cursor)
# run_info = talon.init_run_info(database, build)
# transcript_dict = init_refs.make_transcript_dict(cursor, build)
# vertex_2_gene = init_refs.make_vertex_2_gene_dict(cursor)
# gene_starts, gene_ends = init_refs.make_gene_start_and_end_dict(cursor, build)
# tot_vertices = len(vertex_2_gene)
# query = """ SELECT COUNT(*) FROM temp_monoexon """
# tot_monoexonic = cursor.execute(query).fetchone()[0]
#
# chrom = "chr4"
# strand = "-"
# positions = ( 2500, 2000 )
#
# annotation = talon.identify_monoexon_transcript(chrom, positions,
# strand, cursor,
# location_dict, edge_dict,
# transcript_dict, vertex_2_gene,
# gene_starts, gene_ends, run_info,
# 'temp_gene', 'temp_monoexon')
#
# correct_gene_ID = fetch_correct_ID("TG6", "gene", cursor)
# print(annotation['start_vertex'])
# print(annotation['end_vertex'])
# assert annotation['gene_ID'] == correct_gene_ID
# assert annotation['start_delta'] == None
# assert annotation['end_delta'] == None
#
# # Now check if the transcript got added to the right data structures
# assert len(vertex_2_gene) == tot_vertices + 2
# assert cursor.execute(query).fetchone()[0] == tot_monoexonic + 1
#
# conn.close()
#
def test_antisense(self):
""" Example where the transcript is antisense """
conn, cursor = get_db_cursor()
build = "toy_build"
database = "scratch/toy.db"
talon.get_counters(database)
init_refs.make_temp_novel_gene_table(cursor, build)
init_refs.make_temp_monoexonic_transcript_table(cursor, build)
edge_dict = init_refs.make_edge_dict(cursor)
location_dict = init_refs.make_location_dict(build, cursor)
run_info = talon.init_run_info(database, build)
transcript_dict = init_refs.make_transcript_dict(cursor, build)
vertex_2_gene = init_refs.make_vertex_2_gene_dict(cursor)
gene_starts, gene_ends = init_refs.make_gene_start_and_end_dict(cursor, build)
chrom = "chr4"
strand = "+"
positions = ( 1300, 3900 )
annotation = talon.identify_monoexon_transcript(chrom, positions,
strand, cursor,
location_dict, edge_dict,
transcript_dict, vertex_2_gene,
gene_starts, gene_ends, run_info,
'temp_gene', 'temp_monoexon')
anti_gene_ID = fetch_correct_ID("TG6", "gene", cursor)
gene_novelty_types = [ x[-2] for x in annotation['gene_novelty']]
t_novelty_types = [ x[-2] for x in annotation['transcript_novelty']]
assert annotation['gene_novelty'][0][-1] == "TRUE"
assert "antisense_gene" in gene_novelty_types
assert "antisense_transcript" in t_novelty_types
conn.close()
| 46.852941
| 87
| 0.598975
| 1,082
| 9,558
| 4.946396
| 0.132163
| 0.053812
| 0.078475
| 0.044843
| 0.821188
| 0.80213
| 0.789985
| 0.782324
| 0.764948
| 0.757848
| 0
| 0.015277
| 0.315129
| 9,558
| 203
| 88
| 47.083744
| 0.802322
| 0.450303
| 0
| 0.769231
| 0
| 0
| 0.068993
| 0
| 0
| 0
| 0
| 0
| 0.10989
| 1
| 0.032967
| false
| 0
| 0.032967
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
69bcdda6664f64ef684325c7176bc9fc2427ba09
| 205
|
py
|
Python
|
simpletransformers/classification/__init__.py
|
kinoute/simpletransformers
|
c14d01c8011cbdbf51996f07fc2fbe3d3c433f46
|
[
"Apache-2.0"
] | null | null | null |
simpletransformers/classification/__init__.py
|
kinoute/simpletransformers
|
c14d01c8011cbdbf51996f07fc2fbe3d3c433f46
|
[
"Apache-2.0"
] | null | null | null |
simpletransformers/classification/__init__.py
|
kinoute/simpletransformers
|
c14d01c8011cbdbf51996f07fc2fbe3d3c433f46
|
[
"Apache-2.0"
] | null | null | null |
from simpletransformers.classification.classification_model import ClassificationModel
from simpletransformers.classification.multi_label_classification_model import (
MultiLabelClassificationModel,
)
| 41
| 86
| 0.897561
| 16
| 205
| 11.25
| 0.5625
| 0.244444
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068293
| 205
| 4
| 87
| 51.25
| 0.942408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
69f1109d9cd09dbd8a19e745f7e9cf074cf65bbe
| 258
|
py
|
Python
|
openprocurement/relocation/contracts/validation.py
|
mepps-md/openprocurement.relocation.contracts
|
e14b16b03172aacf828f7f4cd7db792720f9cc73
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/relocation/contracts/validation.py
|
mepps-md/openprocurement.relocation.contracts
|
e14b16b03172aacf828f7f4cd7db792720f9cc73
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/relocation/contracts/validation.py
|
mepps-md/openprocurement.relocation.contracts
|
e14b16b03172aacf828f7f4cd7db792720f9cc73
|
[
"Apache-2.0"
] | 3
|
2017-03-23T22:17:50.000Z
|
2018-02-19T15:35:19.000Z
|
# -*- coding: utf-8 -*-
from openprocurement.relocation.core.validation import validate_accreditation_level
def validate_contract_accreditation_level(request):
validate_accreditation_level(request, request.validated['contract'], 'create_accreditation')
| 43
| 96
| 0.825581
| 27
| 258
| 7.592593
| 0.62963
| 0.263415
| 0.253659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004184
| 0.073643
| 258
| 5
| 97
| 51.6
| 0.853556
| 0.081395
| 0
| 0
| 0
| 0
| 0.119149
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
386d1cad798ccd31fcf2d315c5ff7c222b1ac722
| 207
|
py
|
Python
|
botaclan/google/auth.py
|
bataclanofficial/botaclan
|
93f8ccab4f29c50a395a588b7779431eab8625e8
|
[
"Apache-2.0"
] | null | null | null |
botaclan/google/auth.py
|
bataclanofficial/botaclan
|
93f8ccab4f29c50a395a588b7779431eab8625e8
|
[
"Apache-2.0"
] | null | null | null |
botaclan/google/auth.py
|
bataclanofficial/botaclan
|
93f8ccab4f29c50a395a588b7779431eab8625e8
|
[
"Apache-2.0"
] | null | null | null |
from google.oauth2 import service_account
def generate_credentials(credentials_path: str) -> service_account.Credentials:
return service_account.Credentials.from_service_account_file(credentials_path)
| 34.5
| 82
| 0.859903
| 25
| 207
| 6.76
| 0.52
| 0.331361
| 0.295858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005263
| 0.082126
| 207
| 5
| 83
| 41.4
| 0.884211
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
38bfb2c2a0bd49a67dc70ef99faae6ceffddffe0
| 10,339
|
py
|
Python
|
tests/integrationv2/test_client_authentication.py
|
glaubitz/s2n
|
d0b2c5ef9b5909cf078cb5a3e8acc156aee8cb4f
|
[
"Apache-2.0"
] | 4
|
2020-10-01T21:55:38.000Z
|
2021-02-05T09:47:03.000Z
|
tests/integrationv2/test_client_authentication.py
|
glaubitz/s2n
|
d0b2c5ef9b5909cf078cb5a3e8acc156aee8cb4f
|
[
"Apache-2.0"
] | null | null | null |
tests/integrationv2/test_client_authentication.py
|
glaubitz/s2n
|
d0b2c5ef9b5909cf078cb5a3e8acc156aee8cb4f
|
[
"Apache-2.0"
] | 3
|
2020-06-24T18:36:11.000Z
|
2021-12-09T18:20:37.000Z
|
import copy
import os
import pytest
import time
from configuration import (available_ports, ALL_TEST_CIPHERS, ALL_TEST_CURVES,
ALL_TEST_CERTS, PROTOCOLS)
from common import Certificates, ProviderOptions, Protocols, data_bytes
from fixtures import managed_process
from providers import Provider, S2N, OpenSSL
from utils import invalid_test_parameters, get_parameter_name, get_expected_s2n_version
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("cipher", [cipher for cipher in ALL_TEST_CIPHERS if 'ECDSA' not in cipher.name], ids=get_parameter_name)
@pytest.mark.parametrize("provider", [OpenSSL])
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", PROTOCOLS, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
def test_client_auth_with_s2n_server(managed_process, cipher, provider, curve, protocol, certificate):
port = next(available_ports)
random_bytes = data_bytes(64)
client_options = ProviderOptions(
mode=Provider.ClientMode,
host="localhost",
port=port,
cipher=cipher,
curve=curve,
data_to_send=random_bytes,
use_client_auth=True,
client_key_file=certificate.key,
client_certificate_file=certificate.cert,
insecure=False,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.data_to_send = None
server_options.mode = Provider.ServerMode
server_options.key = Certificates.RSA_2048_SHA256_WILDCARD.key
server_options.cert = Certificates.RSA_2048_SHA256_WILDCARD.cert
# Passing the type of client and server as a parameter will
# allow us to use a fixture to enumerate all possibilities.
server = managed_process(S2N, server_options, timeout=5)
client = managed_process(provider, client_options, timeout=5)
# The client should connect and return without error
for results in client.get_results():
assert results.exception is None
assert results.exit_code == 0
expected_version = get_expected_s2n_version(protocol, provider)
# S2N should indicate the procotol version in a successful connection.
for results in server.get_results():
assert results.exception is None
assert results.exit_code == 0
assert bytes("Actual protocol version: {}".format(expected_version).encode('utf-8')) in results.stdout
assert random_bytes in results.stdout
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("provider", [OpenSSL])
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", PROTOCOLS, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
def test_client_auth_with_s2n_server_using_nonmatching_certs(managed_process, cipher, provider, curve, protocol, certificate):
port = next(available_ports)
client_options = ProviderOptions(
mode=Provider.ClientMode,
host="localhost",
port=port,
cipher=cipher,
curve=curve,
data_to_send=b'',
use_client_auth=True,
client_key_file=certificate.key,
client_certificate_file=certificate.cert,
insecure=False,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.data_to_send = None
server_options.mode = Provider.ServerMode
server_options.key = Certificates.RSA_2048_SHA256_WILDCARD.key
server_options.cert = Certificates.RSA_2048_SHA256_WILDCARD.cert
# Tell the server to expect the wrong certificate
server_options.client_certificate_file=Certificates.RSA_2048_SHA256_WILDCARD.cert
# Passing the type of client and server as a parameter will
# allow us to use a fixture to enumerate all possibilities.
server = managed_process(S2N, server_options, timeout=5)
client = managed_process(OpenSSL, client_options, timeout=5)
# OpenSSL should return 1 because the connection failed
for results in client.get_results():
assert results.exception is None
if protocol == Protocols.TLS13:
# Exit code 104 is connection reset by peer
# This is almost always 104, but we have hit an occasion where s_client
# closed cleanly.
assert results.exit_code == 104 or results.exit_code == 0
else:
assert results.exit_code == 1
# S2N should tell us that mutual authentication failed
for results in server.get_results():
assert results.exception is None
assert results.exit_code == 255
assert b'Error: Mutual Auth was required, but not negotiated' in results.stderr
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", PROTOCOLS, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
@pytest.mark.parametrize("provider", [OpenSSL], ids=get_parameter_name)
def test_client_auth_with_s2n_client_no_cert(managed_process, cipher, curve, protocol, provider, certificate):
port = next(available_ports)
random_bytes = data_bytes(64)
client_options = ProviderOptions(
mode=Provider.ClientMode,
host="localhost",
port=port,
cipher=cipher,
curve=curve,
data_to_send=random_bytes,
use_client_auth=True,
client_trust_store=Certificates.RSA_2048_SHA256_WILDCARD.cert,
insecure=False,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.data_to_send = None
server_options.mode = Provider.ServerMode
server_options.key = Certificates.RSA_2048_SHA256_WILDCARD.key
server_options.cert = Certificates.RSA_2048_SHA256_WILDCARD.cert
# Passing the type of client and server as a parameter will
# allow us to use a fixture to enumerate all possibilities.
server = managed_process(provider, server_options, timeout=5)
client = managed_process(S2N, client_options, timeout=5)
# The client should connect and return without error
for results in client.get_results():
assert results.exception is None
assert results.exit_code == 0
# Openssl should indicate the procotol version in a successful connection.
for results in server.get_results():
assert results.exception is None
assert results.exit_code == 0
assert random_bytes in results.stdout
if protocol is Protocols.TLS13:
message = bytes("SSL_accept:SSLv3/TLS read client certificate\nSSL_accept:SSLv3/TLS read finished".encode('utf-8'))
else:
if 'openssl-1.0.2' in OpenSSL.get_version():
message = bytes('SSL_accept:SSLv3 read client certificate A\nSSL_accept:SSLv3 read client key exchange A\nSSL_accept:SSLv3 read certificate verify A\nSSL_accept:SSLv3 read finished A'.encode('utf-8'))
else:
message = bytes("SSL_accept:SSLv3/TLS read client certificate\nSSL_accept:SSLv3/TLS read client key exchange\nSSL_accept:SSLv3/TLS read change cipher spec\nSSL_accept:SSLv3/TLS read finished".encode('utf-8'))
assert message in results.stderr
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", PROTOCOLS, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
@pytest.mark.parametrize("provider", [OpenSSL], ids=get_parameter_name)
def test_client_auth_with_s2n_client_with_cert(managed_process, cipher, curve, protocol, provider, certificate):
port = next(available_ports)
random_bytes = data_bytes(64)
client_options = ProviderOptions(
mode=Provider.ClientMode,
host="localhost",
port=port,
cipher=cipher,
curve=curve,
data_to_send=random_bytes,
use_client_auth=True,
client_key_file=certificate.key,
client_certificate_file=certificate.cert,
client_trust_store=Certificates.RSA_2048_SHA256_WILDCARD.cert,
insecure=False,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.data_to_send = None
server_options.mode = Provider.ServerMode
server_options.key = Certificates.RSA_2048_SHA256_WILDCARD.key
server_options.cert = Certificates.RSA_2048_SHA256_WILDCARD.cert
# Passing the type of client and server as a parameter will
# allow us to use a fixture to enumerate all possibilities.
server = managed_process(provider, server_options, timeout=5)
client = managed_process(S2N, client_options, timeout=5)
# The client should connect and return without error
for results in client.get_results():
assert results.exception is None
assert results.exit_code == 0
# Openssl should indicate the procotol version in a successful connection.
for results in server.get_results():
assert results.exception is None
assert results.exit_code == 0
assert random_bytes in results.stdout
if protocol is Protocols.TLS13:
message = bytes("SSL_accept:SSLv3/TLS read client certificate\nSSL_accept:SSLv3/TLS read certificate verify\nSSL_accept:SSLv3/TLS read finished".encode('utf-8'))
else:
message = bytes('SSL_accept:SSLv3/TLS read client certificate\nSSL_accept:SSLv3/TLS read client key exchange\nSSL_accept:SSLv3/TLS read certificate verify\nSSL_accept:SSLv3/TLS read change cipher spec\nSSL_accept:SSLv3/TLS read finished'.encode('utf-8'))
if 'openssl-1.0.2' in OpenSSL.get_version():
message = bytes('SSL_accept:SSLv3 read client certificate A\nSSL_accept:SSLv3 read client key exchange A\nSSL_accept:SSLv3 read certificate verify A\nSSL_accept:SSLv3 read finished A'.encode('utf-8'))
assert message in results.stderr
| 45.747788
| 266
| 0.742818
| 1,364
| 10,339
| 5.408358
| 0.123167
| 0.044056
| 0.056934
| 0.04636
| 0.866748
| 0.866748
| 0.86241
| 0.86241
| 0.856852
| 0.856852
| 0
| 0.019536
| 0.17816
| 10,339
| 225
| 267
| 45.951111
| 0.848652
| 0.107747
| 0
| 0.823529
| 0
| 0.029412
| 0.136897
| 0.032595
| 0
| 0
| 0
| 0
| 0.141176
| 1
| 0.023529
| false
| 0
| 0.052941
| 0
| 0.076471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
38df9ae5378e24d418e5d933f10235d5dc04b19e
| 612
|
py
|
Python
|
shop/api/decorators.py
|
PragatiVerma18/DPhi-Task
|
cab83bc522a216bd006617df6cc28b6f479641fe
|
[
"MIT"
] | null | null | null |
shop/api/decorators.py
|
PragatiVerma18/DPhi-Task
|
cab83bc522a216bd006617df6cc28b6f479641fe
|
[
"MIT"
] | null | null | null |
shop/api/decorators.py
|
PragatiVerma18/DPhi-Task
|
cab83bc522a216bd006617df6cc28b6f479641fe
|
[
"MIT"
] | 1
|
2021-07-20T08:53:09.000Z
|
2021-07-20T08:53:09.000Z
|
from django.core.exceptions import PermissionDenied
def user_is_nursery(function):
def wrap(request, *args, **kwargs):
user = request.user
if user.is_authenticated and user.role == "Nursery":
return function(request, *args, **kwargs)
else:
raise PermissionDenied
return wrap
def user_is_buyer(function):
def wrap(request, *args, **kwargs):
user = request.user
if user.is_authenticated and user.role == "Buyer":
return function(request, *args, **kwargs)
else:
raise PermissionDenied
return wrap
| 25.5
| 60
| 0.627451
| 68
| 612
| 5.558824
| 0.338235
| 0.063492
| 0.179894
| 0.116402
| 0.767196
| 0.767196
| 0.767196
| 0.767196
| 0.767196
| 0.767196
| 0
| 0
| 0.277778
| 612
| 23
| 61
| 26.608696
| 0.855204
| 0
| 0
| 0.705882
| 0
| 0
| 0.019608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.235294
| false
| 0
| 0.058824
| 0
| 0.529412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
2a31ae2ff8704ff1c03f2c1d90c0a8148067915f
| 92
|
py
|
Python
|
parameters_8080.py
|
amaurirg/Web2Py
|
235571cd2273a858cbc8f291731672eadf6b8206
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8080.py
|
amaurirg/Web2Py
|
235571cd2273a858cbc8f291731672eadf6b8206
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8080.py
|
amaurirg/Web2Py
|
235571cd2273a858cbc8f291731672eadf6b8206
|
[
"BSD-3-Clause"
] | null | null | null |
password="pbkdf2(1000,20,sha512)$95f88044b35411d6$93381df36be9e1c31359b048fb131c9c532c0dfe"
| 46
| 91
| 0.891304
| 7
| 92
| 11.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.527473
| 0.01087
| 92
| 1
| 92
| 92
| 0.373626
| 0
| 0
| 0
| 0
| 0
| 0.869565
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
2a4251bdcf464c53d83ea64656cf474172938f4e
| 75,640
|
py
|
Python
|
generated/ansible-collection/webapplicationfirewallpolicy.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/ansible-collection/webapplicationfirewallpolicy.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/ansible-collection/webapplicationfirewallpolicy.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webapplicationfirewallpolicy
version_added: '2.9'
short_description: Manage Azure WebApplicationFirewallPolicy instance.
description:
- 'Create, update and delete instance of Azure WebApplicationFirewallPolicy.'
options:
resource_group:
description:
- The name of the resource group.
required: true
type: str
name:
description:
- Resource name.
type: str
id:
description:
- Resource ID.
type: str
location:
description:
- Resource location.
type: str
policy_settings:
description:
- Describes policySettings for policy.
type: dict
suboptions:
enabled_state:
description:
- Describes if the policy is in enabled state or disabled state.
type: str
mode:
description:
- >-
Describes if it is in detection mode or prevention mode at policy
level.
type: str
custom_rules:
description:
- Describes custom rules inside the policy.
type: list
suboptions:
name:
description:
- >-
Gets name of the resource that is unique within a policy. This name
can be used to access the resource.
type: str
priority:
description:
- >-
Describes priority of the rule. Rules with a lower value will be
evaluated before rules with a higher value.
required: true
type: number
rule_type:
description:
- Describes type of rule.
required: true
type: str
match_conditions:
description:
- List of match conditions.
required: true
type: list
suboptions:
match_variables:
description:
- List of match variables.
required: true
type: list
suboptions:
variable_name:
description:
- Match Variable.
required: true
type: str
selector:
description:
- Describes field of the matchVariable collection.
type: str
operator:
description:
- Describes operator to be matched.
required: true
type: str
negation_conditon:
description:
- Describes if this is negate condition or not.
type: boolean
match_values:
description:
- Match value.
required: true
type: list
transforms:
description:
- List of transforms.
type: list
action:
description:
- Type of Actions.
required: true
type: str
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
type: str
application_gateways:
description:
- A collection of references to application gateways.
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- Resource name.
type: str
type:
description:
- Resource type.
type: str
location:
description:
- Resource location.
type: str
sku:
description:
- SKU of the application gateway resource.
type: dict
suboptions:
name:
description:
- Name of an application gateway SKU.
type: str
tier:
description:
- Tier of an application gateway.
type: str
capacity:
description:
- Capacity (instance count) of an application gateway.
type: number
ssl_policy:
description:
- SSL policy of the application gateway resource.
type: dict
suboptions:
disabled_ssl_protocols:
description:
- Ssl protocols to be disabled on application gateway.
type: list
policy_type:
description:
- Type of Ssl Policy.
type: str
policy_name:
description:
- Name of Ssl predefined policy.
type: str
cipher_suites:
description:
- >-
Ssl cipher suites to be enabled in the specified order to
application gateway.
type: list
min_protocol_version:
description:
- >-
Minimum version of Ssl protocol to be supported on application
gateway.
type: str
operational_state:
description:
- Operational state of the application gateway resource.
type: str
gateway_ipconfigurations:
description:
- >-
Subnets of the application gateway resource. For default limits, see
[Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the IP configuration that is unique within an
Application Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
authentication_certificates:
description:
- >-
Authentication certificates of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the authentication certificate that is unique within an
Application Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
trusted_root_certificates:
description:
- >-
Trusted Root certificates of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the trusted root certificate that is unique within an
Application Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
ssl_certificates:
description:
- >-
SSL certificates of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the SSL certificate that is unique within an Application
Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
frontend_ipconfigurations:
description:
- >-
Frontend IP addresses of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the frontend IP configuration that is unique within an
Application Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
frontend_ports:
description:
- >-
Frontend ports of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the frontend port that is unique within an Application
Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
probes:
description:
- Probes of the application gateway resource.
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- Name of the probe that is unique within an Application Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
backend_address_pools:
description:
- >-
Backend address pool of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the backend address pool that is unique within an
Application Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
backend_http_settings_collection:
description:
- >-
Backend http settings of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the backend http settings that is unique within an
Application Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
http_listeners:
description:
- >-
Http listeners of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the HTTP listener that is unique within an Application
Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
url_path_maps:
description:
- >-
URL path map of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the URL path map that is unique within an Application
Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
request_routing_rules:
description:
- Request routing rules of the application gateway resource.
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the request routing rule that is unique within an
Application Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
rewrite_rule_sets:
description:
- Rewrite rules for the application gateway resource.
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the rewrite rule set that is unique within an
Application Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
redirect_configurations:
description:
- >-
Redirect configurations of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
type: list
suboptions:
id:
description:
- Resource ID.
type: str
name:
description:
- >-
Name of the redirect configuration that is unique within an
Application Gateway.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Type of the resource.
type: str
web_application_firewall_configuration:
description:
- Web application firewall configuration.
type: dict
suboptions:
enabled:
description:
- Whether the web application firewall is enabled or not.
required: true
type: boolean
firewall_mode:
description:
- Web application firewall mode.
required: true
type: str
rule_set_type:
description:
- >-
The type of the web application firewall rule set. Possible
values are: 'OWASP'.
required: true
type: str
rule_set_version:
description:
- The version of the rule set type.
required: true
type: str
disabled_rule_groups:
description:
- The disabled rule groups.
type: list
request_body_check:
description:
- Whether allow WAF to check request Body.
type: boolean
max_request_body_size:
description:
- Maximum request body size for WAF.
type: number
max_request_body_size_in_kb:
description:
- Maximum request body size in Kb for WAF.
type: number
file_upload_limit_in_mb:
description:
- Maximum file upload size in Mb for WAF.
type: number
exclusions:
description:
- The exclusion list.
type: list
firewall_policy:
description:
- Reference of the FirewallPolicy resource.
type: dict
suboptions:
id:
description:
- Resource ID.
type: str
enable_http2:
description:
- Whether HTTP2 is enabled on the application gateway resource.
type: boolean
enable_fips:
description:
- Whether FIPS is enabled on the application gateway resource.
type: boolean
autoscale_configuration:
description:
- Autoscale Configuration.
type: dict
suboptions:
min_capacity:
description:
- Lower bound on number of Application Gateway capacity.
required: true
type: number
max_capacity:
description:
- Upper bound on number of Application Gateway capacity.
type: number
resource_guid:
description:
- Resource GUID property of the application gateway resource.
type: str
provisioning_state:
description:
- >-
Provisioning state of the application gateway resource. Possible
values are: 'Updating', 'Deleting', and 'Failed'.
type: str
custom_error_configurations:
description:
- Custom error configurations of the application gateway resource.
type: list
suboptions:
status_code:
description:
- Status code of the application gateway customer error.
type: str
custom_error_page_url:
description:
- Error page URL of the application gateway customer error.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
zones:
description:
- >-
A list of availability zones denoting where the resource needs to
come from.
type: list
identity:
description:
- 'The identity of the application gateway, if configured.'
type: dict
suboptions:
principal_id:
description:
- >-
The principal id of the system assigned identity. This property
will only be provided for a system assigned identity.
type: str
tenant_id:
description:
- >-
The tenant id of the system assigned identity. This property
will only be provided for a system assigned identity.
type: str
type:
description:
- >-
The type of identity used for the resource. The type
'SystemAssigned, UserAssigned' includes both an implicitly
created identity and a set of user assigned identities. The type
'None' will remove any identities from the virtual machine.
type: str
user_assigned_identities:
description:
- >-
The list of user identities associated with resource. The user
identity dictionary key references will be ARM resource ids in
the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
type: >-
unknown[DictionaryType
{"$id":"3152","$type":"DictionaryType","valueType":{"$ref":"3109"},"supportsAdditionalProperties":false,"name":{"$id":"3153","fixed":false},"deprecated":false}]
provisioning_state:
description:
- Provisioning state of the WebApplicationFirewallPolicy.
type: str
resource_state:
description:
- Resource status of the policy.
type: str
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
type: str
type:
description:
- Resource type.
type: str
state:
description:
- Assert the state of the WebApplicationFirewallPolicy.
- >-
Use C(present) to create or update an WebApplicationFirewallPolicy and
C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Creates or updates a WAF policy within a resource group
azure.rm.webapplicationfirewallpolicy:
resource_group: myResourceGroup
name: myApplicationGatewayWebApplicationFirewallPolicy
location: WestUs
custom_rules:
- name: Rule1
priority: '1'
rule_type: MatchRule
match_conditions:
- match_variables:
- variable_name: RemoteAddr
operator: IPMatch
match_values:
- 192.168.1.0/24
- 10.0.0.0/24
action: Block
- name: Rule2
priority: '2'
rule_type: MatchRule
match_conditions:
- match_variables:
- variable_name: RemoteAddr
operator: IPMatch
match_values:
- 192.168.1.0/24
- match_variables:
- variable_name: RequestHeaders
selector: UserAgent
operator: Contains
match_values:
- Windows
action: Block
- name: Deletes a WAF policy within a resource group
azure.rm.webapplicationfirewallpolicy:
resource_group: myResourceGroup
name: myApplicationGatewayWebApplicationFirewallPolicy
state: absent
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type.
returned: always
type: str
sample: null
location:
description:
- Resource location.
returned: always
type: str
sample: null
tags:
description:
- Resource tags.
returned: always
type: >-
unknown[DictionaryType
{"$id":"1099","$type":"DictionaryType","valueType":{"$id":"1100","$type":"PrimaryType","knownPrimaryType":"string","name":{"$id":"1101","fixed":false,"raw":"String"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"1102","fixed":false},"deprecated":false}]
sample: null
properties:
description:
- Properties of the web application firewall policy.
returned: always
type: dict
sample: null
contains:
policy_settings:
description:
- Describes policySettings for policy.
returned: always
type: dict
sample: null
contains:
enabled_state:
description:
- Describes if the policy is in enabled state or disabled state.
returned: always
type: str
sample: null
mode:
description:
- >-
Describes if it is in detection mode or prevention mode at policy
level.
returned: always
type: str
sample: null
custom_rules:
description:
- Describes custom rules inside the policy.
returned: always
type: dict
sample: null
contains:
name:
description:
- >-
Gets name of the resource that is unique within a policy. This
name can be used to access the resource.
returned: always
type: str
sample: null
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
priority:
description:
- >-
Describes priority of the rule. Rules with a lower value will be
evaluated before rules with a higher value.
returned: always
type: number
sample: null
rule_type:
description:
- Describes type of rule.
returned: always
type: str
sample: null
match_conditions:
description:
- List of match conditions.
returned: always
type: dict
sample: null
contains:
match_variables:
description:
- List of match variables.
returned: always
type: dict
sample: null
contains:
variable_name:
description:
- Match Variable.
returned: always
type: str
sample: null
selector:
description:
- Describes field of the matchVariable collection.
returned: always
type: str
sample: null
operator:
description:
- Describes operator to be matched.
returned: always
type: str
sample: null
negation_conditon:
description:
- Describes if this is negate condition or not.
returned: always
type: boolean
sample: null
match_values:
description:
- Match value.
returned: always
type: str
sample: null
transforms:
description:
- List of transforms.
returned: always
type: str
sample: null
action:
description:
- Type of Actions.
returned: always
type: str
sample: null
application_gateways:
description:
- A collection of references to application gateways.
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type.
returned: always
type: str
sample: null
location:
description:
- Resource location.
returned: always
type: str
sample: null
tags:
description:
- Resource tags.
returned: always
type: >-
unknown[DictionaryType
{"$id":"1099","$type":"DictionaryType","valueType":{"$id":"1100","$type":"PrimaryType","knownPrimaryType":"string","name":{"$id":"1101","fixed":false,"raw":"String"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"1102","fixed":false},"deprecated":false}]
sample: null
properties:
description:
- Properties of the application gateway.
returned: always
type: dict
sample: null
sku:
description:
- SKU of the application gateway resource.
returned: always
type: dict
sample: null
contains:
name:
description:
- Name of an application gateway SKU.
returned: always
type: str
sample: null
tier:
description:
- Tier of an application gateway.
returned: always
type: str
sample: null
capacity:
description:
- Capacity (instance count) of an application gateway.
returned: always
type: number
sample: null
ssl_policy:
description:
- SSL policy of the application gateway resource.
returned: always
type: dict
sample: null
contains:
disabled_ssl_protocols:
description:
- Ssl protocols to be disabled on application gateway.
returned: always
type: str
sample: null
policy_type:
description:
- Type of Ssl Policy.
returned: always
type: str
sample: null
policy_name:
description:
- Name of Ssl predefined policy.
returned: always
type: str
sample: null
cipher_suites:
description:
- >-
Ssl cipher suites to be enabled in the specified order to
application gateway.
returned: always
type: str
sample: null
min_protocol_version:
description:
- >-
Minimum version of Ssl protocol to be supported on application
gateway.
returned: always
type: str
sample: null
operational_state:
description:
- Operational state of the application gateway resource.
returned: always
type: str
sample: null
gateway_ipconfigurations:
description:
- >-
Subnets of the application gateway resource. For default limits,
see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway IP configuration.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the IP configuration that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
authentication_certificates:
description:
- >-
Authentication certificates of the application gateway resource.
For default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- >-
Properties of the application gateway authentication
certificate.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the authentication certificate that is unique within
an Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
trusted_root_certificates:
description:
- >-
Trusted Root certificates of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- >-
Properties of the application gateway trusted root
certificate.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the trusted root certificate that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
ssl_certificates:
description:
- >-
SSL certificates of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway SSL certificate.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the SSL certificate that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
frontend_ipconfigurations:
description:
- >-
Frontend IP addresses of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- >-
Properties of the application gateway frontend IP
configuration.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the frontend IP configuration that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
frontend_ports:
description:
- >-
Frontend ports of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway frontend port.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the frontend port that is unique within an Application
Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
probes:
description:
- Probes of the application gateway resource.
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway probe.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the probe that is unique within an Application
Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
backend_address_pools:
description:
- >-
Backend address pool of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway backend address pool.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the backend address pool that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
backend_http_settings_collection:
description:
- >-
Backend http settings of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway backend HTTP settings.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the backend http settings that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
http_listeners:
description:
- >-
Http listeners of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway HTTP listener.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the HTTP listener that is unique within an Application
Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
url_path_maps:
description:
- >-
URL path map of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway URL path map.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the URL path map that is unique within an Application
Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
request_routing_rules:
description:
- Request routing rules of the application gateway resource.
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway request routing rule.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the request routing rule that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
rewrite_rule_sets:
description:
- Rewrite rules for the application gateway resource.
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway rewrite rule set.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the rewrite rule set that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
redirect_configurations:
description:
- >-
Redirect configurations of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway redirect configuration.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the redirect configuration that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
web_application_firewall_configuration:
description:
- Web application firewall configuration.
returned: always
type: dict
sample: null
contains:
enabled:
description:
- Whether the web application firewall is enabled or not.
returned: always
type: boolean
sample: null
firewall_mode:
description:
- Web application firewall mode.
returned: always
type: str
sample: null
rule_set_type:
description:
- >-
The type of the web application firewall rule set. Possible
values are: 'OWASP'.
returned: always
type: str
sample: null
rule_set_version:
description:
- The version of the rule set type.
returned: always
type: str
sample: null
disabled_rule_groups:
description:
- The disabled rule groups.
returned: always
type: dict
sample: null
request_body_check:
description:
- Whether allow WAF to check request Body.
returned: always
type: boolean
sample: null
max_request_body_size:
description:
- Maximum request body size for WAF.
returned: always
type: number
sample: null
max_request_body_size_in_kb:
description:
- Maximum request body size in Kb for WAF.
returned: always
type: number
sample: null
file_upload_limit_in_mb:
description:
- Maximum file upload size in Mb for WAF.
returned: always
type: number
sample: null
exclusions:
description:
- The exclusion list.
returned: always
type: dict
sample: null
firewall_policy:
description:
- Reference of the FirewallPolicy resource.
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
enable_http2:
description:
- Whether HTTP2 is enabled on the application gateway resource.
returned: always
type: boolean
sample: null
enable_fips:
description:
- Whether FIPS is enabled on the application gateway resource.
returned: always
type: boolean
sample: null
autoscale_configuration:
description:
- Autoscale Configuration.
returned: always
type: dict
sample: null
contains:
min_capacity:
description:
- Lower bound on number of Application Gateway capacity.
returned: always
type: number
sample: null
max_capacity:
description:
- Upper bound on number of Application Gateway capacity.
returned: always
type: number
sample: null
resource_guid:
description:
- Resource GUID property of the application gateway resource.
returned: always
type: str
sample: null
provisioning_state:
description:
- >-
Provisioning state of the application gateway resource. Possible
values are: 'Updating', 'Deleting', and 'Failed'.
returned: always
type: str
sample: null
custom_error_configurations:
description:
- Custom error configurations of the application gateway resource.
returned: always
type: dict
sample: null
contains:
status_code:
description:
- Status code of the application gateway customer error.
returned: always
type: str
sample: null
custom_error_page_url:
description:
- Error page URL of the application gateway customer error.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
returned: always
type: str
sample: null
zones:
description:
- >-
A list of availability zones denoting where the resource needs to
come from.
returned: always
type: str
sample: null
identity:
description:
- 'The identity of the application gateway, if configured.'
returned: always
type: dict
sample: null
contains:
principal_id:
description:
- >-
The principal id of the system assigned identity. This
property will only be provided for a system assigned identity.
returned: always
type: str
sample: null
tenant_id:
description:
- >-
The tenant id of the system assigned identity. This property
will only be provided for a system assigned identity.
returned: always
type: str
sample: null
type:
description:
- >-
The type of identity used for the resource. The type
'SystemAssigned, UserAssigned' includes both an implicitly
created identity and a set of user assigned identities. The
type 'None' will remove any identities from the virtual
machine.
returned: always
type: str
sample: null
user_assigned_identities:
description:
- >-
The list of user identities associated with resource. The user
identity dictionary key references will be ARM resource ids in
the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
returned: always
type: >-
unknown[DictionaryType
{"$id":"3152","$type":"DictionaryType","valueType":{"$ref":"3109"},"supportsAdditionalProperties":false,"name":{"$id":"3153","fixed":false},"deprecated":false}]
sample: null
provisioning_state:
description:
- Provisioning state of the WebApplicationFirewallPolicy.
returned: always
type: str
sample: null
resource_state:
description:
- Resource status of the policy.
returned: always
type: str
sample: null
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
returned: always
type: str
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# this is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMWebApplicationFirewallPolicies(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
updatable=False,
disposition='resourceGroupName',
required=true
),
name=dict(
type='str',
updatable=False,
disposition='policyName',
required=true
),
id=dict(
type='str',
updatable=False,
disposition='/'
),
location=dict(
type='str',
updatable=False,
disposition='/'
),
policy_settings=dict(
type='dict',
disposition='/properties/policySettings',
options=dict(
enabled_state=dict(
type='str',
disposition='enabledState',
choices=['Disabled',
'Enabled']
),
mode=dict(
type='str',
choices=['Prevention',
'Detection']
)
)
),
custom_rules=dict(
type='list',
disposition='/properties/customRules',
options=dict(
name=dict(
type='str'
),
priority=dict(
type='number',
required=true
),
rule_type=dict(
type='str',
disposition='ruleType',
choices=['MatchRule',
'Invalid'],
required=true
),
match_conditions=dict(
type='list',
disposition='matchConditions',
required=true,
options=dict(
match_variables=dict(
type='list',
disposition='matchVariables',
required=true,
options=dict(
variable_name=dict(
type='str',
disposition='variableName',
choices=['RemoteAddr',
'RequestMethod',
'QueryString',
'PostArgs',
'RequestUri',
'RequestHeaders',
'RequestBody',
'RequestCookies'],
required=true
),
selector=dict(
type='str'
)
)
),
operator=dict(
type='str',
choices=['IPMatch',
'Equal',
'Contains',
'LessThan',
'GreaterThan',
'LessThanOrEqual',
'GreaterThanOrEqual',
'BeginsWith',
'EndsWith',
'Regex'],
required=true
),
negation_conditon=dict(
type='boolean',
disposition='negationConditon'
),
match_values=dict(
type='list',
disposition='matchValues',
required=true
),
transforms=dict(
type='list',
choices=['Lowercase',
'Trim',
'UrlDecode',
'UrlEncode',
'RemoveNulls',
'HtmlEntityDecode']
)
)
),
action=dict(
type='str',
choices=['Allow',
'Block',
'Log'],
required=true
)
)
),
etag=dict(
type='str',
updatable=False,
disposition='/'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.name = None
self.name = None
self.type = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200, 201, 202]
self.to_do = Actions.NoAction
self.body = {}
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-06-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
super(AzureRMWebApplicationFirewallPolicies, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
if 'location' not in self.body:
self.body['location'] = resource_group.location
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.Network' +
'/ApplicationGatewayWebApplicationFirewallPolicies' +
'/{{ application_gateway_web_application_firewall_policy_name }}')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ application_gateway_web_application_firewall_policy_name }}', self.name)
old_response = self.get_resource()
if not old_response:
self.log("WebApplicationFirewallPolicy instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log('WebApplicationFirewallPolicy instance already exists')
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log('Need to Create / Update the WebApplicationFirewallPolicy instance')
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_resource()
# if not old_response:
self.results['changed'] = True
# else:
# self.results['changed'] = old_response.__ne__(response)
self.log('Creation / Update done')
elif self.to_do == Actions.Delete:
self.log('WebApplicationFirewallPolicy instance deleted')
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_resource():
time.sleep(20)
else:
self.log('WebApplicationFirewallPolicy instance unchanged')
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["name"] = response["name"]
self.results["type"] = response["type"]
self.results["location"] = response["location"]
self.results["tags"] = response["tags"]
self.results["properties"] = response["properties"]
self.results["etag"] = response["etag"]
return self.results
def create_update_resource(self):
# self.log('Creating / Updating the WebApplicationFirewallPolicy instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
except CloudError as exc:
self.log('Error attempting to create the WebApplicationFirewallPolicy instance.')
self.fail('Error creating the WebApplicationFirewallPolicy instance: {0}'.format(str(exc)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
pass
return response
def delete_resource(self):
# self.log('Deleting the WebApplicationFirewallPolicy instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'DELETE',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
except CloudError as e:
self.log('Error attempting to delete the WebApplicationFirewallPolicy instance.')
self.fail('Error deleting the WebApplicationFirewallPolicy instance: {0}'.format(str(e)))
return True
def get_resource(self):
# self.log('Checking if the WebApplicationFirewallPolicy instance {0} is present'.format(self.))
found = False
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
found = True
self.log("Response : {0}".format(response))
# self.log("WebApplicationFirewallPolicy instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the WebApplicationFirewallPolicy instance.')
if found is True:
return response
return False
def main():
AzureRMWebApplicationFirewallPolicies()
if __name__ == '__main__':
main()
| 33.380406
| 292
| 0.473956
| 6,054
| 75,640
| 5.865378
| 0.079947
| 0.040609
| 0.079585
| 0.057957
| 0.841843
| 0.811597
| 0.77907
| 0.748655
| 0.699034
| 0.677293
| 0
| 0.003667
| 0.469976
| 75,640
| 2,265
| 293
| 33.395143
| 0.882043
| 0.010444
| 0
| 0.874034
| 0
| 0.011824
| 0.839417
| 0.04302
| 0
| 0
| 0
| 0
| 0.000455
| 1
| 0.002729
| false
| 0.00091
| 0.004093
| 0
| 0.010914
| 0.000455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2a90973eb8155f265b40045ab40cb913e667c78f
| 9,219
|
py
|
Python
|
tests/test_cli.py
|
jonasjancarik/waybackpy
|
5a7fc7d5683759f759e3729c070bc1cab9376792
|
[
"MIT"
] | 185
|
2020-05-04T11:12:43.000Z
|
2022-03-26T02:25:56.000Z
|
tests/test_cli.py
|
jonasjancarik/waybackpy
|
5a7fc7d5683759f759e3729c070bc1cab9376792
|
[
"MIT"
] | 111
|
2020-05-04T15:40:38.000Z
|
2022-03-03T22:30:00.000Z
|
tests/test_cli.py
|
jonasjancarik/waybackpy
|
5a7fc7d5683759f759e3729c070bc1cab9376792
|
[
"MIT"
] | 28
|
2020-05-05T15:04:22.000Z
|
2022-03-05T15:11:50.000Z
|
import sys
import os
import pytest
import random
import string
import argparse
import waybackpy.cli as cli
from waybackpy.wrapper import Url # noqa: E402
from waybackpy.__version__ import __version__
def test_save():
args = argparse.Namespace(
user_agent=None,
url="https://hfjfjfjfyu6r6rfjvj.fjhgjhfjgvjm",
total=False,
version=False,
file=False,
oldest=False,
save=True,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "could happen because either your waybackpy" or "cannot be archived by wayback machine as it is a redirect" in str(reply)
def test_json():
args = argparse.Namespace(
user_agent=None,
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=True,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "archived_snapshots" in str(reply)
def test_archive_url():
args = argparse.Namespace(
user_agent=None,
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=True,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "https://web.archive.org/web/" in str(reply)
def test_oldest():
args = argparse.Namespace(
user_agent=None,
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=True,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "pypi.org/user/akamhy" in str(reply)
uid = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(6)
)
url = "https://pypi.org/yfvjvycyc667r67ed67r" + uid
args = argparse.Namespace(
user_agent=None,
url=url,
total=False,
version=False,
file=False,
oldest=True,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "Can not find archive for" in str(reply)
def test_newest():
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=True,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "pypi.org/user/akamhy" in str(reply)
uid = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(6)
)
url = "https://pypi.org/yfvjvycyc667r67ed67r" + uid
args = argparse.Namespace(
user_agent=None,
url=url,
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=True,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "Can not find archive for" in str(reply)
def test_total_archives():
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://pypi.org/user/akamhy/",
total=True,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert isinstance(reply, int)
def test_known_urls():
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://www.keybr.com",
total=False,
version=False,
file=True,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=True,
get=None,
)
reply = cli.args_handler(args)
assert "keybr" in str(reply)
def test_near():
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=True,
subdomain=False,
known_urls=False,
get=None,
year=2020,
month=7,
day=15,
hour=1,
minute=1,
)
reply = cli.args_handler(args)
assert "202007" in str(reply)
uid = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(6)
)
url = "https://pypi.org/yfvjvycyc667r67ed67r" + uid
args = argparse.Namespace(
user_agent=None,
url=url,
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=True,
subdomain=False,
known_urls=False,
get=None,
year=2020,
month=7,
day=15,
hour=1,
minute=1,
)
reply = cli.args_handler(args)
assert "Can not find archive for" in str(reply)
def test_get():
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://github.com/akamhy",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get="url",
)
reply = cli.args_handler(args)
assert "waybackpy" in str(reply)
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://github.com/akamhy/waybackpy",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get="oldest",
)
reply = cli.args_handler(args)
assert "waybackpy" in str(reply)
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://akamhy.github.io/waybackpy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get="newest",
)
reply = cli.args_handler(args)
assert "waybackpy" in str(reply)
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get="foobar",
)
reply = cli.args_handler(args)
assert "get the source code of the" in str(reply)
def test_args_handler():
args = argparse.Namespace(version=True)
reply = cli.args_handler(args)
assert ("waybackpy version %s" % (__version__)) == reply
args = argparse.Namespace(url=None, version=False)
reply = cli.args_handler(args)
assert ("waybackpy %s" % (__version__)) in str(reply)
def test_main():
# This also tests the parse_args method in cli.py
cli.main(["temp.py", "--version"])
| 25.608333
| 132
| 0.579347
| 1,141
| 9,219
| 4.57844
| 0.111306
| 0.037902
| 0.051685
| 0.06183
| 0.869832
| 0.845329
| 0.839778
| 0.82523
| 0.810299
| 0.801493
| 0
| 0.033255
| 0.305239
| 9,219
| 359
| 133
| 25.679666
| 0.782358
| 0.006291
| 0
| 0.801223
| 0
| 0.024465
| 0.093798
| 0
| 0
| 0
| 0
| 0
| 0.051988
| 1
| 0.033639
| false
| 0
| 0.027523
| 0
| 0.061162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aaa2df0a82e6b1626b1249daf60535eed0031d30
| 511
|
py
|
Python
|
plugins/konsole/restart.py
|
nielsvm/toggle-desktop
|
2f060d03ad1b36c8e01e43c012fc877ba1bd9f0c
|
[
"BSD-3-Clause"
] | 1
|
2018-07-23T07:42:40.000Z
|
2018-07-23T07:42:40.000Z
|
plugins/konsole/restart.py
|
nielsvm/toggle-desktop
|
2f060d03ad1b36c8e01e43c012fc877ba1bd9f0c
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/konsole/restart.py
|
nielsvm/toggle-desktop
|
2f060d03ad1b36c8e01e43c012fc877ba1bd9f0c
|
[
"BSD-3-Clause"
] | 1
|
2015-03-17T22:46:09.000Z
|
2015-03-17T22:46:09.000Z
|
from core import kde
class Restart4(kde.KDE4Action):
"""Restart Konsole, open terminal instances will get killed."""
def binary_dependencies(self):
return ['konsole']
def execute(self):
return kde.restart('org.kde.konsole', 'konsole')
class Restart5(kde.KDE5Action):
"""Restart Konsole, open terminal instances will get killed."""
def binary_dependencies(self):
return ['konsole']
def execute(self):
return kde.restart('org.kde.konsole', 'konsole')
| 25.55
| 67
| 0.671233
| 60
| 511
| 5.683333
| 0.4
| 0.117302
| 0.105572
| 0.152493
| 0.797654
| 0.797654
| 0.797654
| 0.797654
| 0.797654
| 0.797654
| 0
| 0.009828
| 0.203523
| 511
| 19
| 68
| 26.894737
| 0.82801
| 0.225049
| 0
| 0.727273
| 0
| 0
| 0.150649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0.090909
| 0.363636
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
aaa88e65ca1c14ffe207377e5273624aa9691aaa
| 2,185
|
py
|
Python
|
tfprob/gan/loss.py
|
yaojia1/AttGAN-final
|
92906e6b32cdcabaa841461c6d2efe06a54057d1
|
[
"MIT"
] | 581
|
2018-05-06T05:15:05.000Z
|
2022-03-29T08:13:54.000Z
|
tfprob/gan/loss.py
|
yaojia1/darknet_my
|
92906e6b32cdcabaa841461c6d2efe06a54057d1
|
[
"MIT"
] | 52
|
2018-05-11T09:33:30.000Z
|
2022-03-24T04:27:07.000Z
|
tfprob/gan/loss.py
|
yaojia1/darknet_my
|
92906e6b32cdcabaa841461c6d2efe06a54057d1
|
[
"MIT"
] | 137
|
2018-05-08T14:30:03.000Z
|
2022-02-24T01:50:37.000Z
|
import tensorflow as tf
def get_gan_losses_fn():
bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def d_loss_fn(r_logit, f_logit):
r_loss = bce(tf.ones_like(r_logit), r_logit)
f_loss = bce(tf.zeros_like(f_logit), f_logit)
return r_loss, f_loss
def g_loss_fn(f_logit):
f_loss = bce(tf.ones_like(f_logit), f_logit)
return f_loss
return d_loss_fn, g_loss_fn
def get_hinge_v1_losses_fn():
def d_loss_fn(r_logit, f_logit):
r_loss = tf.reduce_mean(tf.maximum(1 - r_logit, 0))
f_loss = tf.reduce_mean(tf.maximum(1 + f_logit, 0))
return r_loss, f_loss
def g_loss_fn(f_logit):
f_loss = tf.reduce_mean(tf.maximum(1 - f_logit, 0))
return f_loss
return d_loss_fn, g_loss_fn
def get_hinge_v2_losses_fn():
def d_loss_fn(r_logit, f_logit):
r_loss = tf.reduce_mean(tf.maximum(1 - r_logit, 0))
f_loss = tf.reduce_mean(tf.maximum(1 + f_logit, 0))
return r_loss, f_loss
def g_loss_fn(f_logit):
f_loss = tf.reduce_mean(- f_logit)
return f_loss
return d_loss_fn, g_loss_fn
def get_lsgan_losses_fn():
mse = tf.keras.losses.MeanSquaredError()
def d_loss_fn(r_logit, f_logit):
r_loss = mse(tf.ones_like(r_logit), r_logit)
f_loss = mse(tf.zeros_like(f_logit), f_logit)
return r_loss, f_loss
def g_loss_fn(f_logit):
f_loss = mse(tf.ones_like(f_logit), f_logit)
return f_loss
return d_loss_fn, g_loss_fn
def get_wgan_losses_fn():
def d_loss_fn(r_logit, f_logit):
r_loss = - tf.reduce_mean(r_logit)
f_loss = tf.reduce_mean(f_logit)
return r_loss, f_loss
def g_loss_fn(f_logit):
f_loss = - tf.reduce_mean(f_logit)
return f_loss
return d_loss_fn, g_loss_fn
def get_adversarial_losses_fn(mode):
if mode == 'gan':
return get_gan_losses_fn()
elif mode == 'hinge_v1':
return get_hinge_v1_losses_fn()
elif mode == 'hinge_v2':
return get_hinge_v2_losses_fn()
elif mode == 'lsgan':
return get_lsgan_losses_fn()
elif mode == 'wgan':
return get_wgan_losses_fn()
| 26.011905
| 62
| 0.654462
| 382
| 2,185
| 3.32199
| 0.10733
| 0.113475
| 0.055162
| 0.113475
| 0.79275
| 0.720252
| 0.720252
| 0.720252
| 0.720252
| 0.660362
| 0
| 0.009715
| 0.246224
| 2,185
| 83
| 63
| 26.325301
| 0.760777
| 0
| 0
| 0.491525
| 0
| 0
| 0.012815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.271186
| false
| 0
| 0.016949
| 0
| 0.627119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
2a9a0c3aed8b8d54addadc51f3bb2048e7e2f09f
| 6,557
|
py
|
Python
|
src/datalayer/tests/test_calculate_qc.py
|
Dabble-of-DevOps-Bio/ella
|
e38631d302611a143c9baaa684bcbd014d9734e4
|
[
"MIT"
] | null | null | null |
src/datalayer/tests/test_calculate_qc.py
|
Dabble-of-DevOps-Bio/ella
|
e38631d302611a143c9baaa684bcbd014d9734e4
|
[
"MIT"
] | null | null | null |
src/datalayer/tests/test_calculate_qc.py
|
Dabble-of-DevOps-Bio/ella
|
e38631d302611a143c9baaa684bcbd014d9734e4
|
[
"MIT"
] | null | null | null |
import copy
import pytest
from datalayer.alleledataloader.calculate_qc import genotype_calculate_qc
DEFAULT_ALLELE = {"vcf_ref": "G", "vcf_alt": "A", "change_type": "SNP"}
DEFAULT_GENOTYPE = {
"sequencing_depth": 400,
"multiallelic": False,
"filter_status": "PASS",
"allele_depth": {"A": 100, "REF": 100},
"type": "Heterozygous",
}
@pytest.fixture
def genotype():
return copy.deepcopy(DEFAULT_GENOTYPE)
@pytest.fixture
def allele():
return copy.deepcopy(DEFAULT_ALLELE)
def test_allele_ratio_no_data(allele, genotype):
genotype["allele_depth"] = {}
assert "allele_ratio" not in genotype_calculate_qc(allele, genotype, "HTS")
genotype["allele_depth"] = {"REF": 100}
assert "allele_ratio" not in genotype_calculate_qc(allele, genotype, "HTS")
def test_allele_ratio_wrong_data_types(allele, genotype):
genotype["allele_depth"] = {"A": None, "REF": 100}
assert "allele_ratio" not in genotype_calculate_qc(allele, genotype, "HTS")
allele["vcf_alt"] = "G" # vcf_alt not in allele_depth
genotype["allele_depth"] = {"A": 100, "REF": 100}
assert "allele_ratio" not in genotype_calculate_qc(allele, genotype, "HTS")
def test_allele_ratio_correct_calculations(allele, genotype):
# Only zeros should give no ratio
genotype["allele_depth"] = {"A": 0, "REF": 0}
assert "allele_ratio" not in genotype_calculate_qc(allele, genotype, "HTS")
genotype["allele_depth"] = {"A": 100, "REF": 100}
assert genotype_calculate_qc(allele, genotype, "HTS")["allele_ratio"] == 0.5
genotype["allele_depth"] = {"A": 100, "REF": 0}
assert genotype_calculate_qc(allele, genotype, "HTS")["allele_ratio"] == 1
def test_needs_verification_checks_no_data(allele, genotype):
allele["change_type"] = None
genotype["allele_depth"] = None
genotype["sequencing_depth"] = None
genotype["filter_status"] = None
result = genotype_calculate_qc(allele, genotype, "HTS")
needs_verification_checks = result["needs_verification_checks"]
assert needs_verification_checks["snp"] is False
assert needs_verification_checks["pass"] is False
assert needs_verification_checks["dp"] is False
assert needs_verification_checks["allele_ratio"] is False
assert needs_verification_checks["hts"] is True
assert result["needs_verification"] is True
def test_needs_verification_positive(allele, genotype):
# Heterozygous case
allele["change_type"] = "SNP"
genotype["type"] = "Heterozygous"
genotype["allele_depth"] = {"A": 70, "REF": 50}
genotype["sequencing_depth"] = 20
genotype["filter_status"] = "PASS"
result = genotype_calculate_qc(allele, genotype, "HTS")
needs_verification_checks = result["needs_verification_checks"]
assert needs_verification_checks["snp"] is True
assert needs_verification_checks["pass"] is True
assert needs_verification_checks["dp"] is True
assert needs_verification_checks["allele_ratio"] is True
assert needs_verification_checks["hts"] is True
assert result["needs_verification"] is False
# Homozygous case
allele["change_type"] = "SNP"
genotype["type"] = "Homozygous"
genotype["allele_depth"] = {"A": 100, "REF": 10}
genotype["sequencing_depth"] = 20
genotype["filter_status"] = "PASS"
result = genotype_calculate_qc(allele, genotype, "HTS")
needs_verification_checks = result["needs_verification_checks"]
assert needs_verification_checks["snp"] is True
assert needs_verification_checks["pass"] is True
assert needs_verification_checks["dp"] is True
assert needs_verification_checks["allele_ratio"] is True
assert needs_verification_checks["hts"] is True
assert result["needs_verification"] is False
def test_needs_verification_hts_negative(allele, genotype):
# Fail all checks, but sample type is Sanger
allele["change_type"] = None
genotype["allele_depth"] = None
genotype["sequencing_depth"] = None
genotype["filter_status"] = None
result = genotype_calculate_qc(allele, genotype, "Sanger")
needs_verification_checks = result["needs_verification_checks"]
assert needs_verification_checks["snp"] is False
assert needs_verification_checks["pass"] is False
assert needs_verification_checks["dp"] is False
assert needs_verification_checks["allele_ratio"] is False
assert needs_verification_checks["hts"] is False
assert result["needs_verification"] is False
def test_needs_verification_negative(allele, genotype):
# Heterozygous case
allele["change_type"] = "indel"
genotype["type"] = "Heterozygous"
genotype["allele_depth"] = {"A": 100, "REF": 1}
genotype["sequencing_depth"] = 19
genotype["filter_status"] = "FAIL"
result = genotype_calculate_qc(allele, genotype, "HTS")
needs_verification_checks = result["needs_verification_checks"]
assert needs_verification_checks["snp"] is False
assert needs_verification_checks["pass"] is False
assert needs_verification_checks["dp"] is False
assert needs_verification_checks["allele_ratio"] is False
assert needs_verification_checks["hts"] is True
assert result["needs_verification"] is True
# Homozygous case
allele["change_type"] = "del"
genotype["type"] = "Homozygous"
genotype["allele_depth"] = {"A": 50, "REF": 50}
genotype["sequencing_depth"] = 0
genotype["filter_status"] = "something"
result = genotype_calculate_qc(allele, genotype, "HTS")
needs_verification_checks = result["needs_verification_checks"]
assert needs_verification_checks["snp"] is False
assert needs_verification_checks["pass"] is False
assert needs_verification_checks["dp"] is False
assert needs_verification_checks["allele_ratio"] is False
assert needs_verification_checks["hts"] is True
assert result["needs_verification"] is True
# One criteria fails
allele["change_type"] = "SNP"
genotype["type"] = "Homozygous"
genotype["allele_depth"] = {"A": 100, "REF": 10}
genotype["sequencing_depth"] = 19 # Fail
genotype["filter_status"] = "PASS"
result = genotype_calculate_qc(allele, genotype, "HTS")
needs_verification_checks = result["needs_verification_checks"]
assert needs_verification_checks["snp"] is True
assert needs_verification_checks["pass"] is True
assert needs_verification_checks["dp"] is False
assert needs_verification_checks["allele_ratio"] is True
assert needs_verification_checks["hts"] is True
assert result["needs_verification"] is True
| 36.427778
| 80
| 0.724112
| 805
| 6,557
| 5.617391
| 0.099379
| 0.225564
| 0.254312
| 0.224458
| 0.833481
| 0.802742
| 0.791022
| 0.739054
| 0.731756
| 0.708315
| 0
| 0.012158
| 0.159524
| 6,557
| 179
| 81
| 36.631285
| 0.808383
| 0.029587
| 0
| 0.6875
| 0
| 0
| 0.207776
| 0.027546
| 0
| 0
| 0
| 0
| 0.382813
| 1
| 0.070313
| false
| 0.085938
| 0.023438
| 0.015625
| 0.109375
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
2ab07ed929778d4fa263b0ee560ec7abc5d4f60a
| 162
|
py
|
Python
|
rsync.py
|
TuranSKT/detectron2_class
|
c90e68abbd39afa8c34d83ac760cabf3b5d02868
|
[
"MIT"
] | null | null | null |
rsync.py
|
TuranSKT/detectron2_class
|
c90e68abbd39afa8c34d83ac760cabf3b5d02868
|
[
"MIT"
] | null | null | null |
rsync.py
|
TuranSKT/detectron2_class
|
c90e68abbd39afa8c34d83ac760cabf3b5d02868
|
[
"MIT"
] | null | null | null |
import os
os.system("rsync -arz /home/pi/yoli/video_output/* abdullah@35.210.50.168:/home/abdullah/yoli/video")
#os.system("rm -r /home/pi/yoli/video_output/*")
| 40.5
| 101
| 0.728395
| 29
| 162
| 4
| 0.586207
| 0.232759
| 0.172414
| 0.258621
| 0.362069
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065789
| 0.061728
| 162
| 3
| 102
| 54
| 0.697368
| 0.290123
| 0
| 0
| 0
| 0.5
| 0.77193
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
2af1e4e8f77b7a4e3bf176ba6b9350a414254154
| 192
|
py
|
Python
|
flixster/__init__.py
|
jaebradley/flixster
|
f903e5915c74c59eb14063d9257830449f2ede7c
|
[
"MIT"
] | 3
|
2017-06-05T20:17:22.000Z
|
2018-04-20T00:32:39.000Z
|
flixster/__init__.py
|
jaebradley/flixster
|
f903e5915c74c59eb14063d9257830449f2ede7c
|
[
"MIT"
] | 3
|
2017-06-05T12:39:05.000Z
|
2021-06-01T21:48:17.000Z
|
flixster/__init__.py
|
jaebradley/flixster
|
f903e5915c74c59eb14063d9257830449f2ede7c
|
[
"MIT"
] | 1
|
2019-04-11T19:26:51.000Z
|
2019-04-11T19:26:51.000Z
|
from flixster.client import FlixsterClient
from flixster.query import TheaterInformationQuery, SearchType
from flixster.query.parameter_builders import TheaterInformationQueryParameterBuilder
| 48
| 85
| 0.90625
| 18
| 192
| 9.611111
| 0.611111
| 0.208092
| 0.196532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067708
| 192
| 3
| 86
| 64
| 0.96648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6330bee069281f3076bc0e39091217415529e21b
| 398
|
py
|
Python
|
CodeChef/Easy/A_Very_Easy_Problem.py
|
vijay-jaisankar/Competetive_programming
|
860c165ce42337a7380112938b14772e6a553647
|
[
"MIT"
] | null | null | null |
CodeChef/Easy/A_Very_Easy_Problem.py
|
vijay-jaisankar/Competetive_programming
|
860c165ce42337a7380112938b14772e6a553647
|
[
"MIT"
] | null | null | null |
CodeChef/Easy/A_Very_Easy_Problem.py
|
vijay-jaisankar/Competetive_programming
|
860c165ce42337a7380112938b14772e6a553647
|
[
"MIT"
] | null | null | null |
# cook your dish here
print("137=2(2(2)+2+2(0))+2(2+2(0))+2(0)")
print("1315=2(2(2+2(0))+2)+2(2(2+2(0)))+2(2(2)+2(0))+2+2(0)")
print("73=2(2(2)+2)+2(2+2(0))+2(0)")
print("136=2(2(2)+2+2(0))+2(2+2(0))")
print("255=2(2(2)+2+2(0))+2(2(2)+2)+2(2(2)+2(0))+2(2(2))+2(2+2(0))+2(2)+2+2(0)")
print("1384=2(2(2+2(0))+2)+2(2(2+2(0)))+2(2(2)+2)+2(2(2)+2(0))+2(2+2(0))")
print("16385=2(2(2+2(0))+2(2)+2)+2(0)")
| 44.222222
| 80
| 0.469849
| 130
| 398
| 1.438462
| 0.107692
| 0.748663
| 0.818182
| 0.705882
| 0.727273
| 0.700535
| 0.700535
| 0.620321
| 0.620321
| 0.572193
| 0
| 0.352332
| 0.030151
| 398
| 8
| 81
| 49.75
| 0.132124
| 0.047739
| 0
| 0
| 0
| 1
| 0.811671
| 0.811671
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 14
|
932bfe5a7828eefabe5801069ffab6aa2d2370c0
| 1,025
|
py
|
Python
|
wd40/dict_convert.py
|
pepitooo/python-wd40
|
777c1b456e0b6041ef9c107c61151089d64d7158
|
[
"Apache-2.0"
] | null | null | null |
wd40/dict_convert.py
|
pepitooo/python-wd40
|
777c1b456e0b6041ef9c107c61151089d64d7158
|
[
"Apache-2.0"
] | null | null | null |
wd40/dict_convert.py
|
pepitooo/python-wd40
|
777c1b456e0b6041ef9c107c61151089d64d7158
|
[
"Apache-2.0"
] | null | null | null |
class DictToObj(object):
"""
transform a dict to a object
"""
def __init__(self, d):
for a, b in d.items():
if type(a) is bytes:
attr = a.decode()
else:
attr = a
if isinstance(b, (list, tuple)):
setattr(self, attr, [DictToObj(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, attr, DictToObj(b) if isinstance(b, dict) else b)
class DictToObjJson(object):
"""
transform a dict to a object
"""
def __init__(self, d):
for a, b in d.items():
if type(a) is bytes:
attr = a.decode()
else:
attr = a
if type(b) is bytes:
b = b.decode()
if isinstance(b, (list, tuple)):
setattr(self, attr, [DictToObj(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, attr, DictToObj(b) if isinstance(b, dict) else b)
| 32.03125
| 92
| 0.48
| 132
| 1,025
| 3.666667
| 0.212121
| 0.14876
| 0.107438
| 0.198347
| 0.892562
| 0.892562
| 0.892562
| 0.892562
| 0.892562
| 0.892562
| 0
| 0
| 0.405854
| 1,025
| 32
| 93
| 32.03125
| 0.794745
| 0.05561
| 0
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
933c6c84dd40c964d9d9ac585a453327d0e99dbb
| 2,433
|
py
|
Python
|
guiamuseos/museos/migrations/0002_auto_20180514_1300.py
|
rubengarciallorens/X-Serv-Practica-Museos
|
79ce23e20b6e997195895ab9230bdde9392ebf5d
|
[
"Apache-2.0"
] | null | null | null |
guiamuseos/museos/migrations/0002_auto_20180514_1300.py
|
rubengarciallorens/X-Serv-Practica-Museos
|
79ce23e20b6e997195895ab9230bdde9392ebf5d
|
[
"Apache-2.0"
] | null | null | null |
guiamuseos/museos/migrations/0002_auto_20180514_1300.py
|
rubengarciallorens/X-Serv-Practica-Museos
|
79ce23e20b6e997195895ab9230bdde9392ebf5d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-05-14 13:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='museo',
name='accesibilidad',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='contacto',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='descripcion',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='descripcion_entidad',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='equipamiento',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='horario',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='identidad',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='localizacion',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='nombre',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='tipo',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='transporte',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='museo',
name='url',
field=models.TextField(default='DEFAULT_VALUE'),
),
migrations.AlterField(
model_name='post',
name='post_text',
field=models.TextField(default='DEFAULT_VALUE'),
),
]
| 30.037037
| 60
| 0.548705
| 205
| 2,433
| 6.346341
| 0.24878
| 0.199846
| 0.249808
| 0.289777
| 0.7794
| 0.7794
| 0.720215
| 0.720215
| 0.720215
| 0.720215
| 0
| 0.011664
| 0.330456
| 2,433
| 80
| 61
| 30.4125
| 0.786986
| 0.026716
| 0
| 0.69863
| 1
| 0
| 0.15814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027397
| 0
| 0.068493
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9348776a00f134a75e54018b0fda231348769666
| 2,776
|
py
|
Python
|
testUI/checkAppleImage.py
|
lukeliuli/phoneDev
|
46431470a49a994775710dd3346acb490f280aef
|
[
"Apache-2.0"
] | null | null | null |
testUI/checkAppleImage.py
|
lukeliuli/phoneDev
|
46431470a49a994775710dd3346acb490f280aef
|
[
"Apache-2.0"
] | null | null | null |
testUI/checkAppleImage.py
|
lukeliuli/phoneDev
|
46431470a49a994775710dd3346acb490f280aef
|
[
"Apache-2.0"
] | null | null | null |
from cvs import *
import numpy as np
import os,sys
def checkAppleImage():
strTmp = "./res/hfs1.jpg"
im1 = cvs.imread(strTmp)
cvs.setLbs("长沙理工大学测控专业"+"苹果检测B116队"+"显示苹果原始图像")
cvs.imshow(im1)
sleep(1000)
gray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
hsv1 = cv2.cvtColor(im1, cv2.COLOR_BGR2HSV)
# define range of red color in HSV
lower_red = np.array([0,50,50])
upper_red = np.array([20,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv1,lower_red, upper_red)
im2,cnt, hierarchy= cv2.findContours(mask,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) #寻找轮廓
n=len(cnt) #轮廓个数
contoursImg=[]
for i in range(n):
length = cv2.arcLength(cnt[i], True) #获取轮廓长度
area = cv2.contourArea(cnt[i]) #
if length <500 and area<500*500*0.1:
continue
#类似灰度图像的掩膜
tmp3=np.zeros(gray1.shape,np.uint8) #生成黑背景
mask3=cv2.drawContours(tmp3,cnt,i,(255,255,255),-1) #绘制轮廓,形成掩膜,-1轮廓内部被填充
#图割
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
mask3[mask3 == 255] = 1
mask4, bgdModel, fgdModel = cv2.grabCut(im1,mask3,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
mask4[mask3 == 1] = 255
cvs.setLbs("长沙理工大学测控专业苹果检测B116队,"+"显示提取的苹果图像,"+"1.苹果面积是"+str(area)+" 2.苹果周长是"+str(length))
cvs.imshow(mask4)
def analyzeAppleImage(im1):
gray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
hsv1 = cv2.cvtColor(im1, cv2.COLOR_BGR2HSV)
# define range of red color in HSV
lower_red = np.array([0,50,50])
upper_red = np.array([20,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv1,lower_red, upper_red)
im2,cnt, hierarchy= cv2.findContours(mask,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) #寻找轮廓
appleInfo = "没有检测到苹果(红色区域)"
mask4 = mask
n=len(cnt) #轮廓个数
contoursImg=[]
for i in range(n):
length = cv2.arcLength(cnt[i], True) #获取轮廓长度
area = cv2.contourArea(cnt[i]) #
if length <500 and area<500*500*0.1:
continue
#类似灰度图像的掩膜
tmp3=np.zeros(gray1.shape,np.uint8) #生成黑背景
mask3=cv2.drawContours(tmp3,cnt,i,(255,255,255),-1) #绘制轮廓,形成掩膜,-1轮廓内部被填充
#图割
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
mask3[mask3 == 255] = 1
mask4, bgdModel, fgdModel = cv2.grabCut(im1,mask3,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
mask4[mask3 == 1] = 255
appleInfo ="苹果面积是"+str(area)+"苹果周长是"+str(length)
return im2,mask,mask4,appleInfo
return im2,mask,mask4,appleInfo
| 33.445783
| 105
| 0.615274
| 388
| 2,776
| 4.340206
| 0.293814
| 0.021378
| 0.033254
| 0.04038
| 0.813539
| 0.781473
| 0.781473
| 0.781473
| 0.781473
| 0.781473
| 0
| 0.09396
| 0.248559
| 2,776
| 83
| 106
| 33.445783
| 0.713327
| 0.09402
| 0
| 0.736842
| 0
| 0
| 0.043687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.052632
| 0
| 0.122807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
93657cdb897378f18e21542e407ed281bf5fae9c
| 15,140
|
py
|
Python
|
openprocurement/edge/tests/plans.py
|
kroman0/openprocurement.edge
|
e5ad9a3bbb64c10cf529e940ab0018061fcdc4f4
|
[
"Apache-2.0"
] | 3
|
2017-05-17T08:19:30.000Z
|
2017-12-14T07:24:36.000Z
|
openprocurement/edge/tests/plans.py
|
kroman0/openprocurement.edge
|
e5ad9a3bbb64c10cf529e940ab0018061fcdc4f4
|
[
"Apache-2.0"
] | 45
|
2016-10-19T13:34:53.000Z
|
2018-03-13T14:05:13.000Z
|
openprocurement/edge/tests/plans.py
|
kroman0/openprocurement.edge
|
e5ad9a3bbb64c10cf529e940ab0018061fcdc4f4
|
[
"Apache-2.0"
] | 9
|
2016-10-13T02:38:37.000Z
|
2018-08-15T09:12:00.000Z
|
# -*- coding: utf-8 -*-
import unittest
from openprocurement.edge.utils import get_now
from openprocurement.edge.tests.base import (
PlanBaseWebTest,
test_plan_data,
ROUTE_PREFIX
)
class PlanResourceTest(PlanBaseWebTest):
def test_empty_listing(self):
response = self.app.get('/plans')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/plans?opt_jsonp=callback')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/plans?opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/plans?opt_jsonp=callback&opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/plans?offset=2015-01-01T00:00:00+02:00&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
response = self.app.get('/plans?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/plans?feed=changes&offset=0', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Offset expired/invalid', u'location': u'params', u'name': u'offset'}
])
response = self.app.get('/plans?feed=changes&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
self.app.app.registry.update_after = False
response = self.app.get('/plans')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
self.app.app.registry.update_after = True
def test_listing(self):
response = self.app.get('/plans')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
plans = []
for i in range(3):
offset = get_now().isoformat()
plans.append(self.create_plan())
ids = ','.join([i['id'] for i in plans])
while True:
response = self.app.get('/plans')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]),
set([i['id'] for i in plans]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]),
set([i['dateModified'] for i in plans]))
self.assertEqual([i['dateModified'] for i in response.json['data']],
sorted([i['dateModified'] for i in plans]))
while True:
response = self.app.get('/plans?offset={}'.format(offset))
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/plans?limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/plans', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/plans', params=[('opt_fields', 'status,planID')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]),
set([u'id', u'dateModified', u'status', u'planID']))
self.assertIn('opt_fields=status%2CplanID', response.json['next_page']['uri'])
response = self.app.get('/plans?descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]),
set([i['id'] for i in plans]))
self.assertEqual([i['dateModified'] for i in response.json['data']],
sorted([i['dateModified'] for i in plans], reverse=True))
response = self.app.get('/plans?descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_plan_data2 = test_plan_data.copy()
test_plan_data2['mode'] = 'test'
self.create_plan(test_plan_data2)
while True:
response = self.app.get('/plans?mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/plans?mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_changes(self):
response = self.app.get('/plans?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
plans = []
for i in range(3):
plans.append(self.create_plan())
ids = ','.join([i['id'] for i in plans])
while True:
response = self.app.get('/plans?feed=changes')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]),
set([i['id'] for i in plans]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]),
set([i['dateModified'] for i in plans]))
self.assertEqual([i['dateModified'] for i in response.json['data']],
sorted([i['dateModified'] for i in plans]))
response = self.app.get('/plans?feed=changes&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/plans?feed=changes', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/plans?feed=changes', params=[('opt_fields', 'status,planID')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]),
set([u'id', u'dateModified', u'status', u'planID']))
self.assertIn('opt_fields=status%2CplanID', response.json['next_page']['uri'])
response = self.app.get('/plans?feed=changes&descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]),
set([i['id'] for i in plans]))
self.assertEqual([i['dateModified'] for i in response.json['data']],
sorted([i['dateModified'] for i in plans], reverse=True))
response = self.app.get('/plans?feed=changes&descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_plan_data2 = test_plan_data.copy()
test_plan_data2['mode'] = 'test'
self.create_plan(test_plan_data2)
while True:
response = self.app.get('/plans?feed=changes&mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/plans?feed=changes&mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_draft(self):
response = self.app.get('/plans')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
plans = []
data = test_plan_data.copy()
data.update({'status': 'draft'})
for i in range(3):
plans.append(self.create_plan(data))
ids = ','.join([i['id'] for i in plans])
while True:
response = self.app.get('/plans')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]),
set([i['id'] for i in plans]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]),
set([i['dateModified'] for i in plans]))
self.assertEqual([i['dateModified'] for i in response.json['data']],
sorted([i['dateModified'] for i in plans]))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PlanResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 46.728395
| 99
| 0.61004
| 1,831
| 15,140
| 4.979792
| 0.065538
| 0.177671
| 0.11406
| 0.075016
| 0.934415
| 0.934415
| 0.923777
| 0.918403
| 0.910945
| 0.903488
| 0
| 0.018808
| 0.213342
| 15,140
| 323
| 100
| 46.873065
| 0.746767
| 0.001387
| 0
| 0.80916
| 0
| 0.003817
| 0.179335
| 0.031355
| 0
| 0
| 0
| 0
| 0.564886
| 1
| 0.019084
| false
| 0
| 0.01145
| 0
| 0.038168
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4f01cfe0d8479fca6a871288aa3eea79202452c9
| 118
|
py
|
Python
|
platform/hwconf_data/efr32zg13p/PythonSnippet/__init__.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | null | null | null |
platform/hwconf_data/efr32zg13p/PythonSnippet/__init__.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | 1
|
2020-08-25T02:36:22.000Z
|
2020-08-25T02:36:22.000Z
|
platform/hwconf_data/efr32zg13p/PythonSnippet/__init__.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | 1
|
2020-08-25T01:56:04.000Z
|
2020-08-25T01:56:04.000Z
|
from efr32zg13p.halconfig import halconfig_types as types
from efr32zg13p.halconfig import halconfig_dependency as dep
| 59
| 60
| 0.889831
| 16
| 118
| 6.4375
| 0.5
| 0.271845
| 0.446602
| 0.563107
| 0.737864
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074766
| 0.09322
| 118
| 2
| 60
| 59
| 0.88785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
358baa19065ff3756546625366270eb70628df22
| 32,788
|
py
|
Python
|
shop.py
|
Topaz1618/MeowShop
|
024cf75a9895bed6fee19a1a18712023134ae132
|
[
"MIT"
] | null | null | null |
shop.py
|
Topaz1618/MeowShop
|
024cf75a9895bed6fee19a1a18712023134ae132
|
[
"MIT"
] | null | null | null |
shop.py
|
Topaz1618/MeowShop
|
024cf75a9895bed6fee19a1a18712023134ae132
|
[
"MIT"
] | null | null | null |
import json
import jwt
import math
import asyncio
from time import time, sleep
from base import BaseHandler
from code import TokenError, AuthError, ShopError, BaseError, DBError, PayError
from config import SECRET_KEY, PRODUCT_PAGE_LIMIT, MYHEART_PAGE_LIMIT, MYITEMS_PAGE_LIMIT, PACKAGE_LIST, PAGE_LIMIT, ZIP_LIMIT
from shop_enum import GoodsType
from shop_utils import auth_login_redirect, member_login_redirect, get_token_user, get_discount_price, get_discount, add_num, subtract_num, \
async_member_login_redirect, admin_login_redirect, async_auth_login_redirect, producer_login_redirect
from base_extensions import get_user_id
from order_extensions import check_is_member, get_personal_items # Todo: 待修改
from shop_extensions import get_resource_total_counts, generate_feature_items, generate_zip_items, get_product_dict, \
paging_goods_list, slice_product_data, get_myheart_list, add_my_heart, delete_my_heart, count_goods_type, get_my_heart_num, \
get_page_info, get_myitems_num, get_myitems_list, check_is_bought, get_total_num, get_user_info, get_goods_name, generate_component_list, \
get_component_total_counts
def generate_start_end_point(page_num, page_limit, total):
try:
current_page = page_num if page_num is not None else 1
page_limit = page_limit if page_limit is not None else total
start = (current_page - 1) * page_limit
end = total if page_limit * current_page > total else page_limit * current_page
return start, end
except Exception as e:
raise ShopError("7008")
class CheckIsMemberHandler(BaseHandler):
@auth_login_redirect
def get(self):
try:
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
is_member = check_is_member(username)
message = {'msg': is_member, 'error_code': '1000'}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except DBError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
self.write(message)
class UserInfoHandler(BaseHandler):
""" 用户信息接口 (目前只用来看会员到期日期 )"""
@auth_login_redirect
def get(self):
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
# username = self.get_argument("username", None)
data = get_user_info(username)
self.render("user_info.html", data=data, username=username)
@auth_login_redirect
def post(self):
try:
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
data = get_user_info(username)
message = {'msg': data, 'error_code': '1000'}
except Exception as e:
message = {'msg': " Unknow error", 'error_code': '1010'}
self.write(message)
class AboutUsHandler(BaseHandler):
def get(self):
self.render("about_us.html")
class FeedbackHandler(BaseHandler):
def get(self):
self.render("feedback.html")
class CheckIsBoughtHandler(BaseHandler):
""" 检查当前商品是否已经购买 """
@async_auth_login_redirect
async def post(self):
try:
goods_id = self.get_argument("goods_id", None)
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
uid = get_user_id(username)
is_bought = check_is_bought(uid, goods_id)
message = {'msg': is_bought, 'error_code': "1000"}
except BaseError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except TokenError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except ShopError as e:
print("raise error. ", e)
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
print("!!!!!!!!!!!!!1", message)
self.write(message)
class NotifyPurchasedHandler(BaseHandler):
""" 提示商品已购买接口 """
def get(self):
try:
info = self.get_argument("info", None)
back_button = self.get_argument("btn", None)
goods_id = self.get_argument("goods_id", None)
if goods_id is not None:
goods_name = get_goods_name(goods_id)
data = get_product_dict(goods_name)
print("!!!!!!", goods_id, goods_name)
return self.render("notify_purchased.html", error_message=info, btn=back_button, data=data)
else:
return self.render("error_page.html", error_message="未知商品 ID")
except DBError as e:
return self.render("error_page.html", error_message=e.error_msg)
except Exception as e:
print("!!!!", e)
return self.render("error_page.html", error_message="Unknow Error")
class PayPageHandler(BaseHandler):
""" 返回付款页面(所有支付方式通用) """
@auth_login_redirect
def get(self):
print("data")
goods_name = self.get_argument("goods_name", None)
goods_price = self.get_argument("goods_price", None)
is_recharge = self.get_argument("is_recharge", None)
goods_id = self.get_argument("goods_id", None)
is_recharge = True if is_recharge is not None else False
data = {
"goods_name": goods_name,
"goods_price": goods_price,
"is_recharge": is_recharge,
"goods_id": goods_id,
}
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
self.render("pay.html", data=data, is_recharge=is_recharge, username=username)
class PackageListHandler(BaseHandler):
""" 套餐列表接口 """
# @auth_login_redirect
def get(self):
print(PACKAGE_LIST)
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
if cookie_token is not None:
username = get_token_user(cookie_token, token)
else:
username = None
self.render("package_catalog.html", data=PACKAGE_LIST, username=username, get_discount=get_discount, get_discount_price=get_discount_price)
def post(self):
try:
data = json.dumps(PACKAGE_LIST)
message = {'msg': data, 'error_code': '1000'}
except Exception as e:
message = {'msg': "Unknow error", 'error_code': '1010'}
self.write(message)
class FeatureListHandler(BaseHandler):
""" 所有功能列表 """
@producer_login_redirect
def get(self):
try:
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
uid = get_user_id(username)
page_num = self.get_argument("page", None)
if isinstance(page_num, str):
page_num = int(page_num)
total = get_resource_total_counts(GoodsType.FEATURE)
total_page = math.ceil(total / ZIP_LIMIT)
current_page = page_num if page_num is not None else 1
start = (current_page - 1) * ZIP_LIMIT
end = total if ZIP_LIMIT * current_page > total else ZIP_LIMIT * current_page
page_info = {
"start": start,
"end": end,
"limit": ZIP_LIMIT,
"total_data": total,
"current_page": current_page,
"total_page": total_page,
}
data_list = generate_feature_items(uid, start, end)
# data = json.dumps(data_list, ensure_ascii=False)
# message = {'msg': data, 'error_code': '1000'}
# print(data_list)
self.render("feature_items.html", page_info=page_info, data=data_list, username=username, subtract=subtract_num, add=add_num)
except BaseError as e:
self.render("error_page.html", error_message=e.error_msg)
except TokenError as e:
self.render("error_page.html", error_message=e.error_msg)
except AuthError as e:
self.render("error_page.html", error_message=e.error_msg)
except ShopError as e:
print("raise error. ", e)
self.render("error_page.html", error_message=e.error_msg)
except Exception as e:
print(e)
self.render("error_page.html", error_message="Unknow Error")
@member_login_redirect
def post(self):
try:
token = self.get_argument("Authorization", None)
if token is None:
raise TokenError("5000")
token_dic = jwt.decode(token.encode(), SECRET_KEY)
username = token_dic.get('phonenum')
uid = get_user_id(username)
page_num = self.get_argument("page_num", None)
page_limit = self.get_argument("page_limit", None)
if isinstance(page_num, str):
page_num = int(page_num)
if isinstance(page_limit, str):
page_limit = int(page_limit)
total = get_resource_total_counts(GoodsType.FEATURE)
print("Data count: ", total)
start, end = generate_start_end_point(page_num, page_limit, total)
data_list = generate_feature_items(uid, start, end)
data = json.dumps(data_list, ensure_ascii=False)
message = {'msg': data, 'error_code': '1000'}
except BaseError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except TokenError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except ShopError as e:
print("raise error. ", e)
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
self.write(message)
class ZipListHandler(BaseHandler):
""" 所有Zip 列表 """
@producer_login_redirect
def get(self):
try:
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
uid = get_user_id(username)
page_num = self.get_argument("page", None)
# page_limit = self.get_argument("page_limit", None)
if isinstance(page_num, str):
page_num = int(page_num)
# if isinstance(page_limit, str):
# page_limit = int(page_limit)
total = get_resource_total_counts(GoodsType.ZIP)
total_page = math.ceil(total / ZIP_LIMIT)
current_page = page_num if page_num is not None else 1
start = (current_page - 1) * ZIP_LIMIT
end = total if ZIP_LIMIT * current_page > total else ZIP_LIMIT * current_page
# start, end = generate_start_end_point(page_num, PAGE_LIMIT, total)
page_info = {
"start": start,
"end": end,
"limit": ZIP_LIMIT,
"total_data": total,
"current_page": current_page,
"total_page": total_page,
}
print("Data count: ", total)
data_list = generate_zip_items(uid, start, end, is_admin=True)
self.render("zip_items.html", page_info=page_info, data=data_list, username=username, subtract=subtract_num, add=add_num)
except BaseError as e:
self.render("error_page.html", error_message=e.error_msg)
except TokenError as e:
self.render("error_page.html", error_message=e.error_msg)
except AuthError as e:
self.render("error_page.html", error_message=e.error_msg)
except ShopError as e:
print("raise error. ", e)
self.render("error_page.html", error_message=e.error_msg)
except Exception as e:
print(e)
self.render("error_page.html", error_message="Unknow Error")
@member_login_redirect
def post(self):
try:
token = self.get_argument("Authorization", None)
if token is None:
raise TokenError("5000")
token_dic = jwt.decode(token.encode(), SECRET_KEY)
username = token_dic.get('phonenum')
uid = get_user_id(username)
print(uid)
page_num = self.get_argument("page_num", None)
page_limit = self.get_argument("page_limit", None)
if isinstance(page_num, str):
page_num = int(page_num)
if isinstance(page_limit, str):
page_limit = int(page_limit)
total = get_resource_total_counts(GoodsType.ZIP, uid)
print("Data count: ", total)
start, end = generate_start_end_point(page_num, page_limit, total)
data_list = generate_zip_items(uid, start, end)
data = json.dumps(data_list, ensure_ascii=False)
print(data)
message = {'msg': data, 'error_code': '1000'}
except BaseError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except TokenError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except ShopError as e:
print("raise error. ", e)
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
self.write(message)
class ComponentListHandler(BaseHandler):
# @async_member_login_redirect
def get(self):
""" """
try:
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
uid = get_user_id(username)
menu = self.get_argument("menu", None)
if menu is None:
raise BaseError("1002")
page_num = self.get_argument("page_num", None)
page_limit = self.get_argument("page_limit", None)
if isinstance(page_num, str):
page_num = int(page_num)
if isinstance(page_limit, str):
page_limit = int(page_limit)
total = get_component_total_counts(menu)
start, end = generate_start_end_point(page_num, page_limit, total)
data = generate_component_list(uid, menu, start, end)
print(data)
message = {'msg': data, 'error_code': '1000'}
except BaseError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except TokenError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except ShopError as e:
print("raise error. ", e)
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
self.write(message)
class CountGoodsHandler(BaseHandler):
def post(self):
goods_count_list = count_goods_type()
self.write(goods_count_list)
class StoreCatalogHandler(BaseHandler):
""" 所有商品页面 """
# @async_member_login_redirect
async def get(self):
try:
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
print("!!!! ",cookie_token)
if cookie_token is not None:
username = get_token_user(cookie_token, token)
uid = get_user_id(username)
else:
username = uid = None
page = self.get_argument("page", None)
is_sort = self.get_argument("sort", None)
filter_list = self.get_argument("filter", None)
if isinstance(page, str):
page = int(page)
current_page = page if page is not None else 1
is_sort = is_sort if is_sort is not None else "0"
filter_list = filter_list.split(" ") if filter_list is not None else ["All"]
t0 = time()
loop = asyncio.get_event_loop()
group1 = await asyncio.gather(*[
loop.run_in_executor(None, count_goods_type, ), # 统计各类商品数量
loop.run_in_executor(None, get_page_info, current_page, filter_list, is_sort, uid), # 获取当前页数据
# loop.run_in_executor(None, get_myheart_list, uid, True) # 获取我的喜欢列表
])
goods_count_list, page_info = group1
print("All Step2: ", time(), time() - t0)
self.render("store_catalog.html", username=username, get_discount_price=get_discount_price,
page_info=page_info, add=add_num, subtract=subtract_num, goods_count_list=goods_count_list)
except BaseError as e:
self.render("error_page.html", error_message=e.error_msg)
except TokenError as e:
self.render("error_page.html", error_message=e.error_msg)
except AuthError as e:
self.render("error_page.html", error_message=e.error_msg)
except DBError as e:
self.render("error_page.html", error_message=e.error_msg)
except ShopError as e:
print("raise error. ", e)
self.render("error_page.html", error_message=e.error_msg)
except Exception as e:
print(e)
self.render("error_page.html", error_message="Unknow Error")
# @member_login_redirect
def post(self):
try:
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
is_sort = self.get_argument("sort", None)
filter_list = self.get_argument("filter", None)
page_num = self.get_argument("page_num", None)
page_limit = self.get_argument("page_limit", None)
uid = get_user_id(username)
if isinstance(page_num, str):
page_num = int(page_num)
if isinstance(page_limit, str):
page_limit = int(page_limit)
filter_list = filter_list.split(" ") if filter_list is not None else []
is_filter = False if len(filter_list) == 0 else True
is_sort = is_sort if is_sort is not None else "0"
page_limit = PRODUCT_PAGE_LIMIT if page_limit is None else page_limit
if is_filter:
total = paging_goods_list(filter_list)
else:
total = get_total_num()
start, end = generate_start_end_point(page_num, page_limit, total)
goods_list = slice_product_data(start, end, is_sort, filter_list, uid, is_filter)
message = {'msg': goods_list, 'error_code': '1000'}
except BaseError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except TokenError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except DBError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except ShopError as e:
print("raise error. ", e)
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
self.write(message)
class SingleProductHandler(BaseHandler):
""" 商品详情页面 """
@member_login_redirect
def get(self):
try:
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
uid = get_user_id(username)
goods_name = self.get_argument("goods_name", None)
goods_id = self.get_argument("goods_id", None)
goods_price = self.get_argument("goods_price", None)
base_data = {
"goods_name": goods_name,
"goods_id": goods_id,
"goods_price": goods_price,
}
product_list = get_product_dict(goods_name, uid)
data = product_list[0]
# print(data)
self.render("single_product.html", data=data, base_data=base_data, username=username, get_discount_price=get_discount_price)
except BaseError as e:
self.render("error_page.html", error_message=e.error_msg)
except TokenError as e:
self.render("error_page.html", error_message=e.error_msg)
except AuthError as e:
self.render("error_page.html", error_message=e.error_msg)
except DBError as e:
self.render("error_page.html", error_message=e.error_msg)
except ShopError as e:
print("raise error. ", e)
self.render("error_page.html", error_message=e.error_msg)
except Exception as e:
print(e)
self.render("error_page.html", error_message="Unknow Error")
def post(self):
try:
goods_name = self.get_argument("goods_name", None)
print(goods_name)
product_list = get_product_dict(goods_name)
# data = product_list[0]
# print(product_list)
message = {'msg': product_list, 'error_code': '1000'}
except BaseError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except TokenError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except DBError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except ShopError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
self.write(message)
class AddMyHeartHandler(BaseHandler):
""" 增加删除物品 """
def post(self):
try:
cookie_token = self.get_secure_cookie("token")
# token = self.get_argument("Authorization", None)
if cookie_token is None:
raise TokenError("5000")
token = cookie_token
if isinstance(token, bytes):
token = token.decode()
token_dic = jwt.decode(token.encode(), SECRET_KEY)
username = token_dic.get('phonenum')
print(f">> Current User: {username}")
goods_id = self.get_argument("goods_id")
add_my_heart(username, goods_id)
print(">")
message = {'msg': "successful", 'error_code': '1000'}
except BaseError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except TokenError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except ShopError as e:
print("raise error. ", e)
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
print(message)
self.write(message)
class DeleteMyHeartHandler(BaseHandler):
""" 删除收藏物品 """
def post(self):
try:
cookie_token = self.get_secure_cookie("token")
# token = self.get_argument("Authorization", None)
if cookie_token is None:
raise TokenError("5000")
token = cookie_token
if isinstance(token, bytes):
token = token.decode()
token_dic = jwt.decode(token.encode(), SECRET_KEY)
username = token_dic.get('phonenum')
print(f">> Current User: {username}")
goods_id = self.get_argument("goods_id")
delete_my_heart(username, goods_id)
message = {'msg': "successful", 'error_code': '1000'}
except BaseError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except TokenError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except ShopError as e:
print("raise error. ", e)
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
self.write(message)
class MyHeartItemsHandler(BaseHandler):
""" 收藏物品 API"""
@auth_login_redirect
def get(self):
try:
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
page = self.get_argument("page", None)
if isinstance(page, str):
page = int(page)
uid = get_user_id(username)
current_page = page if page is not None else 1
total_data = get_my_heart_num(uid)
total_page = total_data / MYHEART_PAGE_LIMIT
total_page = math.ceil(total_page)
start = (current_page - 1) * MYHEART_PAGE_LIMIT
end = total_data if MYHEART_PAGE_LIMIT * current_page > total_data else MYHEART_PAGE_LIMIT * current_page
print("!!!!!!! ", total_data, total_page, start, end)
data = get_myheart_list(uid, start, end)
# goods_list = slice_product_data(start, end)
page_info = {
"start": start,
"end": end,
"limit": MYHEART_PAGE_LIMIT,
"total_data": total_data,
"current_page": current_page,
"total_page": total_page,
}
self.render("my_heart.html", username=username, data=data, page_info=page_info, add=add_num, subtract=subtract_num, get_discount_price=get_discount_price )
except BaseError as e:
self.render("error_page.html", error_message=e.error_msg)
except TokenError as e:
self.render("error_page.html", error_message=e.error_msg)
except AuthError as e:
self.render("error_page.html", error_message=e.error_msg)
except DBError as e:
self.render("error_page.html", error_message=e.error_msg)
except ShopError as e:
print("raise error. ", e)
self.render("error_page.html", error_message=e.error_msg)
except Exception as e:
print(e)
self.render("error_page.html", error_message="Unknow Error")
def post(self):
try:
cookie_token = self.get_secure_cookie("token")
if cookie_token is None:
data = list()
else:
token = cookie_token
if isinstance(token, bytes):
token = token.decode()
token_dic = jwt.decode(token.encode(), SECRET_KEY)
username = token_dic.get('phonenum')
uid = get_user_id(username)
start = 0
end = 3
data = get_myheart_list(uid, start, end)
message = {'msg': data, 'error_code': '1000'}
except BaseError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except TokenError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except DBError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except ShopError as e:
print("raise error. ", e)
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
self.write(message)
class MyItemsHandler(BaseHandler):
""" 获取当前用户所有物品: 购买物品, 自己上传的物品"""
@auth_login_redirect
def get(self):
try:
cookie_token = self.get_secure_cookie("token")
token = self.get_argument("Authorization", None)
username = get_token_user(cookie_token, token)
uid = get_user_id(username)
page = self.get_argument("page", None)
if isinstance(page, str):
page = int(page)
current_page = page if page is not None else 1
total_data = get_myitems_num(uid)
total_page = total_data / MYITEMS_PAGE_LIMIT
total_page = math.ceil(total_page)
start = (current_page - 1) * MYITEMS_PAGE_LIMIT
end = total_data if MYITEMS_PAGE_LIMIT * current_page > total_data else MYITEMS_PAGE_LIMIT * current_page
print("!!!!!!! ", total_data, total_page, start, end)
data = get_myitems_list(uid, start, end)
# goods_list = slice_product_data(start, end)
page_info = {
"start": start,
"end": end,
"limit": MYITEMS_PAGE_LIMIT,
"total_data": total_data,
"current_page": current_page,
"total_page": total_page,
}
self.render("my_items.html", username=username, data=data, page_info=page_info, add=add_num, subtract=subtract_num, get_discount_price=get_discount_price)
except BaseError as e:
self.render("error_page.html", error_message=e.error_msg)
except TokenError as e:
self.render("error_page.html", error_message=e.error_msg)
except AuthError as e:
self.render("error_page.html", error_message=e.error_msg)
except DBError as e:
self.render("error_page.html", error_message=e.error_msg)
except ShopError as e:
print("raise error. ", e)
self.render("error_page.html", error_message=e.error_msg)
except Exception as e:
print(e)
self.render("error_page.html", error_message="Unknow Error")
class AllProductsHandler(BaseHandler):
""" 所有物品接口(待废弃) """
@auth_login_redirect
def post(self):
try:
token = self.get_argument("Authorization", None)
token_dic = jwt.decode(token.encode(), SECRET_KEY)
username = token_dic.get('phonenum')
# username = "15600803270"
all_personal_items = get_personal_items(username)
data = json.dumps(all_personal_items, ensure_ascii=False)
print(data)
message = {'msg': data, 'error_code': '1000'}
except AuthError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except DBError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except PayError as e:
message = {'msg': e.error_msg, 'error_code': e.error_code}
except Exception as e:
print(e)
message = {'msg': "Unknow Error", 'error_code': '1010'}
self.write(message)
| 34.368973
| 167
| 0.591924
| 3,964
| 32,788
| 4.631937
| 0.051968
| 0.038233
| 0.035782
| 0.028757
| 0.807255
| 0.785524
| 0.762976
| 0.745275
| 0.735744
| 0.716301
| 0
| 0.007309
| 0.30316
| 32,788
| 953
| 168
| 34.405037
| 0.796306
| 0.029767
| 0
| 0.780967
| 0
| 0
| 0.101656
| 0.000663
| 0
| 0
| 0
| 0.001049
| 0
| 1
| 0.037764
| false
| 0
| 0.019637
| 0
| 0.093656
| 0.07855
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35c6f2e2a7d35d32d0cde807c1a0e921cbe60feb
| 306
|
py
|
Python
|
temboo/core/Library/LittleSis/List/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/LittleSis/List/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/LittleSis/List/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.LittleSis.List.GetList import GetList, GetListInputSet, GetListResultSet, GetListChoreographyExecution
from temboo.Library.LittleSis.List.ListSearchByKeyword import ListSearchByKeyword, ListSearchByKeywordInputSet, ListSearchByKeywordResultSet, ListSearchByKeywordChoreographyExecution
| 102
| 182
| 0.908497
| 22
| 306
| 12.636364
| 0.636364
| 0.071942
| 0.122302
| 0.18705
| 0.215827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045752
| 306
| 2
| 183
| 153
| 0.952055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ea3e3f9e2c861cee4fefc445a36cbb63bdbf1beb
| 132
|
py
|
Python
|
services/apis/django-api/source/api/api_admin_ui/admin/__init__.py
|
fzi-forschungszentrum-informatik/BEMCom
|
0a0c359d889c6d5975e4d4d3b17c24adb5bf883b
|
[
"MIT"
] | 4
|
2021-09-10T09:46:18.000Z
|
2021-12-05T17:55:14.000Z
|
services/apis/django-api/source/api/api_admin_ui/admin/__init__.py
|
fzi-forschungszentrum-informatik/BEMCom
|
0a0c359d889c6d5975e4d4d3b17c24adb5bf883b
|
[
"MIT"
] | null | null | null |
services/apis/django-api/source/api/api_admin_ui/admin/__init__.py
|
fzi-forschungszentrum-informatik/BEMCom
|
0a0c359d889c6d5975e4d4d3b17c24adb5bf883b
|
[
"MIT"
] | null | null | null |
"""
"""
from api_admin_ui.admin import connector_admin, datapoint_admin, rest_admin
from api_admin_ui.admin import controller_admin
| 26.4
| 75
| 0.833333
| 20
| 132
| 5.1
| 0.45
| 0.137255
| 0.235294
| 0.27451
| 0.490196
| 0.490196
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 132
| 4
| 76
| 33
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.